< prev index next >

src/hotspot/share/memory/memoryReserver.cpp

Print this page
*** 501,11 ***
  // Create protection page at the beginning of the space.
  static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
    assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
    assert(reserved.is_reserved(), "should only be called on a reserved memory area");
  
!   if (reserved.end() > (char *)OopEncodingHeapMax) {
      if (true
          WIN64_ONLY(&& !UseLargePages)
          AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
        // Protect memory at the base of the allocated region.
        if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
--- 501,12 ---
  // Create protection page at the beginning of the space.
  static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
    assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
    assert(reserved.is_reserved(), "should only be called on a reserved memory area");
  
!   if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
+     assert((reserved.base() != nullptr), "sanity");
      if (true
          WIN64_ONLY(&& !UseLargePages)
          AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
        // Protect memory at the base of the allocated region.
        if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {

*** 547,17 ***
      NOT_AIX(os::vm_allocation_granularity());
  
    const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
  
    char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
!   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
!     noaccess_prefix_size : 0;
  
    ReservedSpace reserved{};
  
    // Attempt to alloc at user-given address.
!   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
      reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
      if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
        release(reserved);
        reserved = {};
      }
--- 548,24 ---
      NOT_AIX(os::vm_allocation_granularity());
  
    const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
  
    char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
!   char* heap_end_address = aligned_heap_base_min_address + size;
! 
+   bool unscaled  = false;
+   bool zerobased = false;
+   if (!UseCompatibleCompressedOops) { // heap base is not enforced
+     unscaled  = (heap_end_address <= (char*)UnscaledOopHeapMax);
+     zerobased = (heap_end_address <= (char*)OopEncodingHeapMax);
+   }
+   size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
  
    ReservedSpace reserved{};
  
    // Attempt to alloc at user-given address.
!   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
      reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
      if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
        release(reserved);
        reserved = {};
      }

*** 575,11 ***
      // address. try_reserve_heap() always returns this allocated memory, as only here
      // the criteria for a good heap are checked.
  
      // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
      // Give it several tries from top of range to bottom.
!     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
  
        // Calc address range within we try to attach (range of possible start addresses).
        char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
        char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
        reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
--- 583,11 ---
      // address. try_reserve_heap() always returns this allocated memory, as only here
      // the criteria for a good heap are checked.
  
      // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
      // Give it several tries from top of range to bottom.
!     if (unscaled) {
  
        // Calc address range within we try to attach (range of possible start addresses).
        char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
        char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
        reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,

*** 588,13 ***
  
      // zerobased: Attempt to allocate in the lower 32G.
      char *zerobased_max = (char *)OopEncodingHeapMax;
  
      // Give it several tries from top of range to bottom.
!     if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
!         ((!reserved.is_reserved()) ||                            // No previous try succeeded.
!          (reserved.end() > zerobased_max))) {                    // Unscaled delivered an arbitrary address.
  
        // Release previous reservation
        release(reserved);
  
        // Calc address range within we try to attach (range of possible start addresses).
--- 596,13 ---
  
      // zerobased: Attempt to allocate in the lower 32G.
      char *zerobased_max = (char *)OopEncodingHeapMax;
  
      // Give it several tries from top of range to bottom.
!     if (zerobased &&                          // Zerobased theoretical possible.
!         ((!reserved.is_reserved()) ||         // No previous try succeeded.
!          (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
  
        // Release previous reservation
        release(reserved);
  
        // Calc address range within we try to attach (range of possible start addresses).

*** 656,10 ***
--- 664,11 ---
        assert(reserved.size() == size, "Prefix should be gone");
        return ReservedHeapSpace(reserved, noaccess_prefix);
      }
  
      // We reserved heap memory without a noaccess prefix.
+     assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
      return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
    }
  
    // Failed
    return {};
< prev index next >