< prev index next >

src/hotspot/share/memory/memoryReserver.cpp

Print this page

471   uint start = i;
472 
473   // Avoid more steps than requested.
474   i = 0;
475   while (addresses[start+i] != 0) {
476     if (i == HeapSearchSteps) {
477       addresses[start+i] = 0;
478       break;
479     }
480     i++;
481   }
482 
483   return (char**) &addresses[start];
484 }
485 
486 // Create protection page at the beginning of the space.
487 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
488   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
489   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
490 
491   if (reserved.end() > (char *)OopEncodingHeapMax) {

492     if (true
493         WIN64_ONLY(&& !UseLargePages)
494         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
495       // Protect memory at the base of the allocated region.
496       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
497         fatal("cannot protect protection page");
498       }
499       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
500                                  PTR_FORMAT " / %zd bytes",
501                                  p2i(reserved.base()),
502                                  noaccess_prefix);
503       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
504     } else {
505       CompressedOops::set_use_implicit_null_checks(false);
506     }
507   }
508 
509   return reserved.last_part(noaccess_prefix);
510 }
511 

517   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
518 
519   assert(alignment >= os::vm_page_size(), "alignment too small");
520   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
521   assert(is_power_of_2(alignment), "not a power of 2");
522 
523   // The necessary attach point alignment for generated wish addresses.
524   // This is needed to increase the chance of attaching for mmap and shmat.
525   // AIX is the only platform that uses System V shm for reserving virtual memory.
526   // In this case, the required alignment of the allocated size (64K) and the alignment
527   // of possible start points of the memory region (256M) differ.
528   // This is not reflected by os_allocation_granularity().
529   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
530   const size_t os_attach_point_alignment =
531     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
532     NOT_AIX(os::vm_allocation_granularity());
533 
534   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
535 
536   uintptr_t aligned_heap_base_min_address = align_up(MAX2(HeapBaseMinAddress, alignment), alignment);
537   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > OopEncodingHeapMax) ?
538     noaccess_prefix_size : 0;







539 
540   ReservedSpace reserved{};
541 
542   // Attempt to alloc at user-given address.
543   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
544     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, (char*)aligned_heap_base_min_address);
545     if (reserved.base() != (char*)aligned_heap_base_min_address) { // Enforce this exact address.
546       release(reserved);
547       reserved = {};
548     }
549   }
550 
551   // Keep heap at HeapBaseMinAddress.
552   if (!reserved.is_reserved()) {
553 
554     // Try to allocate the heap at addresses that allow efficient oop compression.
555     // Different schemes are tried, in order of decreasing optimization potential.
556     //
557     // For this, try_reserve_heap() is called with the desired heap base addresses.
558     // A call into the os layer to allocate at a given address can return memory
559     // at a different address than requested.  Still, this might be memory at a useful
560     // address. try_reserve_heap() always returns this allocated memory, as only here
561     // the criteria for a good heap are checked.
562 
563     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
564     // Give it several tries from top of range to bottom.
565     if (aligned_heap_base_min_address + size <= UnscaledOopHeapMax) {
566 
567       // Calc address range within we try to attach (range of possible start addresses).
568       uintptr_t const highest_start = align_down(UnscaledOopHeapMax - size, attach_point_alignment);
569       uintptr_t const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
570       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT ,
571                                           lowest_start, highest_start);
572       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
573                                    (char*)aligned_heap_base_min_address, (char*)UnscaledOopHeapMax, size, alignment, page_size);
574     }
575 
576     // zerobased: Attempt to allocate in the lower 32G.
577     const uintptr_t zerobased_max = OopEncodingHeapMax;
578 
579     // Give it several tries from top of range to bottom.
580     if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
581         ((!reserved.is_reserved()) ||                            // No previous try succeeded.
582          (reserved.end() > (char*)zerobased_max))) {             // Unscaled delivered an arbitrary address.
583 
584       // Release previous reservation
585       release(reserved);
586 
587       // Calc address range within we try to attach (range of possible start addresses).
588       uintptr_t const highest_start = align_down(zerobased_max - size, attach_point_alignment);
589       // Need to be careful about size being guaranteed to be less
590       // than UnscaledOopHeapMax due to type constraints.
591       uintptr_t lowest_start = aligned_heap_base_min_address;
592       if (size < UnscaledOopHeapMax) {
593         lowest_start = MAX2<uintptr_t>(lowest_start, UnscaledOopHeapMax - size);
594       }
595       lowest_start = align_up(lowest_start, attach_point_alignment);
596       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT,
597                                           lowest_start, highest_start);
598       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
599                                    (char*)aligned_heap_base_min_address, (char*)zerobased_max, size, alignment, page_size);
600     }

629     }
630   }
631 
632   // No more reserve attempts
633 
634   if (reserved.is_reserved()) {
635     // Successfully found and reserved memory for the heap.
636 
637     if (reserved.size() > size) {
638       // We reserved heap memory with a noaccess prefix.
639 
640       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
641       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
642       // if we had to try at arbitrary address.
643       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
644       assert(reserved.size() == size, "Prefix should be gone");
645       return ReservedHeapSpace(reserved, noaccess_prefix);
646     }
647 
648     // We reserved heap memory without a noaccess prefix.

649     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
650   }
651 
652   // Failed
653   return {};
654 }
655 
656 #endif // _LP64
657 
658 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
659   if (UseCompressedOops) {
660 #ifdef _LP64
661     return reserve_compressed_oops_heap(size, alignment, page_size);
662 #endif
663   } else {
664     return reserve_uncompressed_oops_heap(size, alignment, page_size);
665   }
666 }
667 
668 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {

471   uint start = i;
472 
473   // Avoid more steps than requested.
474   i = 0;
475   while (addresses[start+i] != 0) {
476     if (i == HeapSearchSteps) {
477       addresses[start+i] = 0;
478       break;
479     }
480     i++;
481   }
482 
483   return (char**) &addresses[start];
484 }
485 
486 // Create protection page at the beginning of the space.
487 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
488   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
489   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
490 
491   if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
492     assert((reserved.base() != nullptr), "sanity");
493     if (true
494         WIN64_ONLY(&& !UseLargePages)
495         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
496       // Protect memory at the base of the allocated region.
497       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
498         fatal("cannot protect protection page");
499       }
500       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
501                                  PTR_FORMAT " / %zd bytes",
502                                  p2i(reserved.base()),
503                                  noaccess_prefix);
504       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
505     } else {
506       CompressedOops::set_use_implicit_null_checks(false);
507     }
508   }
509 
510   return reserved.last_part(noaccess_prefix);
511 }
512 

518   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
519 
520   assert(alignment >= os::vm_page_size(), "alignment too small");
521   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
522   assert(is_power_of_2(alignment), "not a power of 2");
523 
524   // The necessary attach point alignment for generated wish addresses.
525   // This is needed to increase the chance of attaching for mmap and shmat.
526   // AIX is the only platform that uses System V shm for reserving virtual memory.
527   // In this case, the required alignment of the allocated size (64K) and the alignment
528   // of possible start points of the memory region (256M) differ.
529   // This is not reflected by os_allocation_granularity().
530   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
531   const size_t os_attach_point_alignment =
532     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
533     NOT_AIX(os::vm_allocation_granularity());
534 
535   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
536 
537   uintptr_t aligned_heap_base_min_address = align_up(MAX2(HeapBaseMinAddress, alignment), alignment);
538   uintptr_t heap_end_address = aligned_heap_base_min_address + size;
539 
540   bool unscaled  = false;
541   bool zerobased = false;
542   if (!UseCompatibleCompressedOops) { // heap base is not enforced
543     unscaled  = (heap_end_address <= UnscaledOopHeapMax);
544     zerobased = (heap_end_address <= OopEncodingHeapMax);
545   }
546   size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
547 
548   ReservedSpace reserved{};
549 
550   // Attempt to alloc at user-given address.
551   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
552     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, (char*)aligned_heap_base_min_address);
553     if (reserved.base() != (char*)aligned_heap_base_min_address) { // Enforce this exact address.
554       release(reserved);
555       reserved = {};
556     }
557   }
558 
559   // Keep heap at HeapBaseMinAddress.
560   if (!reserved.is_reserved()) {
561 
562     // Try to allocate the heap at addresses that allow efficient oop compression.
563     // Different schemes are tried, in order of decreasing optimization potential.
564     //
565     // For this, try_reserve_heap() is called with the desired heap base addresses.
566     // A call into the os layer to allocate at a given address can return memory
567     // at a different address than requested.  Still, this might be memory at a useful
568     // address. try_reserve_heap() always returns this allocated memory, as only here
569     // the criteria for a good heap are checked.
570 
571     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
572     // Give it several tries from top of range to bottom.
573     if (unscaled) {
574 
575       // Calc address range within we try to attach (range of possible start addresses).
576       uintptr_t const highest_start = align_down(UnscaledOopHeapMax - size, attach_point_alignment);
577       uintptr_t const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
578       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT ,
579                                           lowest_start, highest_start);
580       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
581                                    (char*)aligned_heap_base_min_address, (char*)UnscaledOopHeapMax, size, alignment, page_size);
582     }
583 
584     // zerobased: Attempt to allocate in the lower 32G.
585     const uintptr_t zerobased_max = OopEncodingHeapMax;
586 
587     // Give it several tries from top of range to bottom.
588     if (zerobased &&                                             // Zerobased theoretical possible.
589         ((!reserved.is_reserved()) ||                            // No previous try succeeded.
590          (reserved.end() > (char*)zerobased_max))) {             // Unscaled delivered an arbitrary address.
591 
592       // Release previous reservation
593       release(reserved);
594 
595       // Calc address range within we try to attach (range of possible start addresses).
596       uintptr_t const highest_start = align_down(zerobased_max - size, attach_point_alignment);
597       // Need to be careful about size being guaranteed to be less
598       // than UnscaledOopHeapMax due to type constraints.
599       uintptr_t lowest_start = aligned_heap_base_min_address;
600       if (size < UnscaledOopHeapMax) {
601         lowest_start = MAX2<uintptr_t>(lowest_start, UnscaledOopHeapMax - size);
602       }
603       lowest_start = align_up(lowest_start, attach_point_alignment);
604       assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT,
605                                           lowest_start, highest_start);
606       reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
607                                    (char*)aligned_heap_base_min_address, (char*)zerobased_max, size, alignment, page_size);
608     }

637     }
638   }
639 
640   // No more reserve attempts
641 
642   if (reserved.is_reserved()) {
643     // Successfully found and reserved memory for the heap.
644 
645     if (reserved.size() > size) {
646       // We reserved heap memory with a noaccess prefix.
647 
648       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
649       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
650       // if we had to try at arbitrary address.
651       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
652       assert(reserved.size() == size, "Prefix should be gone");
653       return ReservedHeapSpace(reserved, noaccess_prefix);
654     }
655 
656     // We reserved heap memory without a noaccess prefix.
657     assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
658     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
659   }
660 
661   // Failed
662   return {};
663 }
664 
665 #endif // _LP64
666 
667 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
668   if (UseCompressedOops) {
669 #ifdef _LP64
670     return reserve_compressed_oops_heap(size, alignment, page_size);
671 #endif
672   } else {
673     return reserve_uncompressed_oops_heap(size, alignment, page_size);
674   }
675 }
676 
677 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
< prev index next >