< prev index next >

src/hotspot/share/memory/memoryReserver.cpp

Print this page

484   uint start = i;
485 
486   // Avoid more steps than requested.
487   i = 0;
488   while (addresses[start+i] != 0) {
489     if (i == HeapSearchSteps) {
490       addresses[start+i] = 0;
491       break;
492     }
493     i++;
494   }
495 
496   return (char**) &addresses[start];
497 }
498 
499 // Create protection page at the beginning of the space.
500 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
501   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
502   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
503 
504   if (reserved.end() > (char *)OopEncodingHeapMax) {

505     if (true
506         WIN64_ONLY(&& !UseLargePages)
507         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
508       // Protect memory at the base of the allocated region.
509       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
510         fatal("cannot protect protection page");
511       }
512       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
513                                  PTR_FORMAT " / %zd bytes",
514                                  p2i(reserved.base()),
515                                  noaccess_prefix);
516       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
517     } else {
518       CompressedOops::set_use_implicit_null_checks(false);
519     }
520   }
521 
522   return reserved.last_part(noaccess_prefix);
523 }
524 

530   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
531 
532   assert(alignment >= os::vm_page_size(), "alignment too small");
533   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
534   assert(is_power_of_2(alignment), "not a power of 2");
535 
536   // The necessary attach point alignment for generated wish addresses.
537   // This is needed to increase the chance of attaching for mmap and shmat.
538   // AIX is the only platform that uses System V shm for reserving virtual memory.
539   // In this case, the required alignment of the allocated size (64K) and the alignment
540   // of possible start points of the memory region (256M) differ.
541   // This is not reflected by os_allocation_granularity().
542   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
543   const size_t os_attach_point_alignment =
544     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
545     NOT_AIX(os::vm_allocation_granularity());
546 
547   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
548 
549   uintptr_t aligned_heap_base_min_address = align_up(MAX2(HeapBaseMinAddress, alignment), alignment);
550   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > OopEncodingHeapMax) ?
551     noaccess_prefix_size : 0;







552 
553   ReservedSpace reserved{};
554 
555   // Attempt to alloc at user-given address.
556   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
557     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, (char*)aligned_heap_base_min_address);
558     if (reserved.base() != (char*)aligned_heap_base_min_address) { // Enforce this exact address.
559       release(reserved);
560       reserved = {};
561     }
562   }
563 
564   // Keep heap at HeapBaseMinAddress.
565   if (!reserved.is_reserved()) {
566 
567     // Try to allocate the heap at addresses that allow efficient oop compression.
568     // Different schemes are tried, in order of decreasing optimization potential.
569     //
570     // For this, try_reserve_heap() is called with the desired heap base addresses.
571     // A call into the os layer to allocate at a given address can return memory
572     // at a different address than requested.  Still, this might be memory at a useful
573     // address. try_reserve_heap() always returns this allocated memory, as only here
574     // the criteria for a good heap are checked.
575 
576     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
577     // Give it several tries from top of range to bottom.
578     if (aligned_heap_base_min_address + size <= UnscaledOopHeapMax) {
579 
580       // Calc address range within we try to attach (range of possible start addresses).
581       uintptr_t const highest_start = align_down(UnscaledOopHeapMax - size, attach_point_alignment);
582       uintptr_t const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
583       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT ,
584                                           lowest_start, highest_start);
585       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
586                                    (char*)aligned_heap_base_min_address, (char*)UnscaledOopHeapMax, size, alignment, page_size);
587     }
588 
589     // zerobased: Attempt to allocate in the lower 32G.
590     const uintptr_t zerobased_max = OopEncodingHeapMax;
591 
592     // Give it several tries from top of range to bottom.
593     if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
594         ((!reserved.is_reserved()) ||                            // No previous try succeeded.
595          (reserved.end() > (char*)zerobased_max))) {             // Unscaled delivered an arbitrary address.
596 
597       // Release previous reservation
598       release(reserved);
599 
600       // Calc address range within we try to attach (range of possible start addresses).
601       uintptr_t const highest_start = align_down(zerobased_max - size, attach_point_alignment);
602       // Need to be careful about size being guaranteed to be less
603       // than UnscaledOopHeapMax due to type constraints.
604       uintptr_t lowest_start = aligned_heap_base_min_address;
605       if (size < UnscaledOopHeapMax) {
606         lowest_start = MAX2<uintptr_t>(lowest_start, UnscaledOopHeapMax - size);
607       }
608       lowest_start = align_up(lowest_start, attach_point_alignment);
609       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT,
610                                           lowest_start, highest_start);
611       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
612                                    (char*)aligned_heap_base_min_address, (char*)zerobased_max, size, alignment, page_size);
613     }

642     }
643   }
644 
645   // No more reserve attempts
646 
647   if (reserved.is_reserved()) {
648     // Successfully found and reserved memory for the heap.
649 
650     if (reserved.size() > size) {
651       // We reserved heap memory with a noaccess prefix.
652 
653       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
654       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
655       // if we had to try at arbitrary address.
656       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
657       assert(reserved.size() == size, "Prefix should be gone");
658       return ReservedHeapSpace(reserved, noaccess_prefix);
659     }
660 
661     // We reserved heap memory without a noaccess prefix.

662     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
663   }
664 
665   // Failed
666   return {};
667 }
668 
669 #endif // _LP64
670 
671 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
672   if (UseCompressedOops) {
673 #ifdef _LP64
674     return reserve_compressed_oops_heap(size, alignment, page_size);
675 #endif
676   } else {
677     return reserve_uncompressed_oops_heap(size, alignment, page_size);
678   }
679 }
680 
681 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {

484   uint start = i;
485 
486   // Avoid more steps than requested.
487   i = 0;
488   while (addresses[start+i] != 0) {
489     if (i == HeapSearchSteps) {
490       addresses[start+i] = 0;
491       break;
492     }
493     i++;
494   }
495 
496   return (char**) &addresses[start];
497 }
498 
499 // Create protection page at the beginning of the space.
500 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
501   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
502   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
503 
504   if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
505     assert((reserved.base() != nullptr), "sanity");
506     if (true
507         WIN64_ONLY(&& !UseLargePages)
508         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
509       // Protect memory at the base of the allocated region.
510       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
511         fatal("cannot protect protection page");
512       }
513       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
514                                  PTR_FORMAT " / %zd bytes",
515                                  p2i(reserved.base()),
516                                  noaccess_prefix);
517       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
518     } else {
519       CompressedOops::set_use_implicit_null_checks(false);
520     }
521   }
522 
523   return reserved.last_part(noaccess_prefix);
524 }
525 

531   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
532 
533   assert(alignment >= os::vm_page_size(), "alignment too small");
534   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
535   assert(is_power_of_2(alignment), "not a power of 2");
536 
537   // The necessary attach point alignment for generated wish addresses.
538   // This is needed to increase the chance of attaching for mmap and shmat.
539   // AIX is the only platform that uses System V shm for reserving virtual memory.
540   // In this case, the required alignment of the allocated size (64K) and the alignment
541   // of possible start points of the memory region (256M) differ.
542   // This is not reflected by os_allocation_granularity().
543   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
544   const size_t os_attach_point_alignment =
545     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
546     NOT_AIX(os::vm_allocation_granularity());
547 
548   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
549 
550   uintptr_t aligned_heap_base_min_address = align_up(MAX2(HeapBaseMinAddress, alignment), alignment);
551   uintptr_t heap_end_address = aligned_heap_base_min_address + size;
552 
553   bool unscaled  = false;
554   bool zerobased = false;
555   if (!UseCompatibleCompressedOops) { // heap base is not enforced
556     unscaled  = (heap_end_address <= UnscaledOopHeapMax);
557     zerobased = (heap_end_address <= OopEncodingHeapMax);
558   }
559   size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
560 
561   ReservedSpace reserved{};
562 
563   // Attempt to alloc at user-given address.
564   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
565     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, (char*)aligned_heap_base_min_address);
566     if (reserved.base() != (char*)aligned_heap_base_min_address) { // Enforce this exact address.
567       release(reserved);
568       reserved = {};
569     }
570   }
571 
572   // Keep heap at HeapBaseMinAddress.
573   if (!reserved.is_reserved()) {
574 
575     // Try to allocate the heap at addresses that allow efficient oop compression.
576     // Different schemes are tried, in order of decreasing optimization potential.
577     //
578     // For this, try_reserve_heap() is called with the desired heap base addresses.
579     // A call into the os layer to allocate at a given address can return memory
580     // at a different address than requested.  Still, this might be memory at a useful
581     // address. try_reserve_heap() always returns this allocated memory, as only here
582     // the criteria for a good heap are checked.
583 
584     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
585     // Give it several tries from top of range to bottom.
586     if (unscaled) {
587 
588       // Calc address range within we try to attach (range of possible start addresses).
589       uintptr_t const highest_start = align_down(UnscaledOopHeapMax - size, attach_point_alignment);
590       uintptr_t const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
591       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT ,
592                                           lowest_start, highest_start);
593       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
594                                    (char*)aligned_heap_base_min_address, (char*)UnscaledOopHeapMax, size, alignment, page_size);
595     }
596 
597     // zerobased: Attempt to allocate in the lower 32G.
598     const uintptr_t zerobased_max = OopEncodingHeapMax;
599 
600     // Give it several tries from top of range to bottom.
601     if (zerobased &&                                             // Zerobased theoretical possible.
602         ((!reserved.is_reserved()) ||                            // No previous try succeeded.
603          (reserved.end() > (char*)zerobased_max))) {             // Unscaled delivered an arbitrary address.
604 
605       // Release previous reservation
606       release(reserved);
607 
608       // Calc address range within we try to attach (range of possible start addresses).
609       uintptr_t const highest_start = align_down(zerobased_max - size, attach_point_alignment);
610       // Need to be careful about size being guaranteed to be less
611       // than UnscaledOopHeapMax due to type constraints.
612       uintptr_t lowest_start = aligned_heap_base_min_address;
613       if (size < UnscaledOopHeapMax) {
614         lowest_start = MAX2<uintptr_t>(lowest_start, UnscaledOopHeapMax - size);
615       }
616       lowest_start = align_up(lowest_start, attach_point_alignment);
617       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT,
618                                           lowest_start, highest_start);
619       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
620                                    (char*)aligned_heap_base_min_address, (char*)zerobased_max, size, alignment, page_size);
621     }

650     }
651   }
652 
653   // No more reserve attempts
654 
655   if (reserved.is_reserved()) {
656     // Successfully found and reserved memory for the heap.
657 
658     if (reserved.size() > size) {
659       // We reserved heap memory with a noaccess prefix.
660 
661       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
662       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
663       // if we had to try at arbitrary address.
664       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
665       assert(reserved.size() == size, "Prefix should be gone");
666       return ReservedHeapSpace(reserved, noaccess_prefix);
667     }
668 
669     // We reserved heap memory without a noaccess prefix.
670     assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
671     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
672   }
673 
674   // Failed
675   return {};
676 }
677 
678 #endif // _LP64
679 
680 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
681   if (UseCompressedOops) {
682 #ifdef _LP64
683     return reserve_compressed_oops_heap(size, alignment, page_size);
684 #endif
685   } else {
686     return reserve_uncompressed_oops_heap(size, alignment, page_size);
687   }
688 }
689 
690 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
< prev index next >