514 "size not aligned to os::vm_allocation_granularity()");
515 assert((alignment & (granularity - 1)) == 0,
516 "alignment not aligned to os::vm_allocation_granularity()");
517 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
518 "not a power of 2");
519
520 // The necessary attach point alignment for generated wish addresses.
521 // This is needed to increase the chance of attaching for mmap and shmat.
522 // AIX is the only platform that uses System V shm for reserving virtual memory.
523 // In this case, the required alignment of the allocated size (64K) and the alignment
524 // of possible start points of the memory region (256M) differ.
525 // This is not reflected by os_allocation_granularity().
526 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
527 const size_t os_attach_point_alignment =
528 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
529 NOT_AIX(os::vm_allocation_granularity());
530
531 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
532
533 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
534 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
535 noaccess_prefix_size(alignment) : 0;
536
537 // Attempt to alloc at user-given address.
538 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
539 try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
540 if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
541 release();
542 }
543 }
544
545 // Keep heap at HeapBaseMinAddress.
546 if (_base == nullptr) {
547
548 // Try to allocate the heap at addresses that allow efficient oop compression.
549 // Different schemes are tried, in order of decreasing optimization potential.
550 //
551 // For this, try_reserve_heap() is called with the desired heap base addresses.
552 // A call into the os layer to allocate at a given address can return memory
553 // at a different address than requested. Still, this might be memory at a useful
554 // address. try_reserve_heap() always returns this allocated memory, as only here
555 // the criteria for a good heap are checked.
556
557 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
558 // Give it several tries from top of range to bottom.
559 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
560
561 // Calc address range within we try to attach (range of possible start addresses).
562 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
563 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
564 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
565 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
566 }
567
568 // zerobased: Attempt to allocate in the lower 32G.
569 char *zerobased_max = (char *)OopEncodingHeapMax;
570
571 // Give it several tries from top of range to bottom.
572 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
573 ((_base == nullptr) || // No previous try succeeded.
574 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
575
576 // Calc address range within we try to attach (range of possible start addresses).
577 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
578 // Need to be careful about size being guaranteed to be less
579 // than UnscaledOopHeapMax due to type constraints.
580 char *lowest_start = aligned_heap_base_min_address;
581 uint64_t unscaled_end = UnscaledOopHeapMax - size;
582 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
583 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
584 }
585 lowest_start = align_up(lowest_start, attach_point_alignment);
586 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
587 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
588 }
589
590 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
591 // implement null checks.
592 noaccess_prefix = noaccess_prefix_size(alignment);
620
621 if (heap_allocation_directory != nullptr) {
622 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
623 if (_fd_for_heap == -1) {
624 vm_exit_during_initialization(
625 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
626 }
627 // When there is a backing file directory for this space then whether
628 // large pages are allocated is up to the filesystem of the backing file.
629 // If requested, let the user know that explicit large pages can't be used.
630 if (use_explicit_large_pages(page_size) && large_pages_requested()) {
631 log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
632 }
633 }
634
635 // Heap size should be aligned to alignment, too.
636 guarantee(is_aligned(size, alignment), "set by caller");
637
638 if (UseCompressedOops) {
639 initialize_compressed_heap(size, alignment, page_size);
640 if (_size > size) {
641 // We allocated heap with noaccess prefix.
642 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
643 // if we had to try at arbitrary address.
644 establish_noaccess_prefix();
645 }
646 } else {
647 initialize(size, alignment, page_size, nullptr, false);
648 }
649
650 assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
651 "area must be distinguishable from marks for mark-sweep");
652 assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
653 "area must be distinguishable from marks for mark-sweep");
654
655 if (base() != nullptr) {
656 MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
657 }
658
659 if (_fd_for_heap != -1) {
660 ::close(_fd_for_heap);
|
514 "size not aligned to os::vm_allocation_granularity()");
515 assert((alignment & (granularity - 1)) == 0,
516 "alignment not aligned to os::vm_allocation_granularity()");
517 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
518 "not a power of 2");
519
520 // The necessary attach point alignment for generated wish addresses.
521 // This is needed to increase the chance of attaching for mmap and shmat.
522 // AIX is the only platform that uses System V shm for reserving virtual memory.
523 // In this case, the required alignment of the allocated size (64K) and the alignment
524 // of possible start points of the memory region (256M) differ.
525 // This is not reflected by os_allocation_granularity().
526 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
527 const size_t os_attach_point_alignment =
528 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
529 NOT_AIX(os::vm_allocation_granularity());
530
531 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
532
533 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
534 size_t noaccess_prefix = (((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) LP64_ONLY(|| UseCompatibleCompressedOops)) ?
535 noaccess_prefix_size(alignment) : 0;
536
537 // Attempt to alloc at user-given address.
538 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) LP64_ONLY(|| UseCompatibleCompressedOops)) {
539 try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
540 if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
541 release();
542 }
543 }
544
545 // Keep heap at HeapBaseMinAddress.
546 if (_base == nullptr) {
547
548 // Try to allocate the heap at addresses that allow efficient oop compression.
549 // Different schemes are tried, in order of decreasing optimization potential.
550 //
551 // For this, try_reserve_heap() is called with the desired heap base addresses.
552 // A call into the os layer to allocate at a given address can return memory
553 // at a different address than requested. Still, this might be memory at a useful
554 // address. try_reserve_heap() always returns this allocated memory, as only here
555 // the criteria for a good heap are checked.
556
557 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
558 // Give it several tries from top of range to bottom.
559 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax LP64_ONLY(&& !UseCompatibleCompressedOops)) {
560
561 // Calc address range within we try to attach (range of possible start addresses).
562 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
563 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
564 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
565 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
566 }
567
568 // zerobased: Attempt to allocate in the lower 32G.
569 char *zerobased_max = (char *)OopEncodingHeapMax;
570
571 // Give it several tries from top of range to bottom.
572 if (LP64_ONLY(!UseCompatibleCompressedOops &&)
573 aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
574 ((_base == nullptr) || // No previous try succeeded.
575 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
576
577 // Calc address range within we try to attach (range of possible start addresses).
578 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
579 // Need to be careful about size being guaranteed to be less
580 // than UnscaledOopHeapMax due to type constraints.
581 char *lowest_start = aligned_heap_base_min_address;
582 uint64_t unscaled_end = UnscaledOopHeapMax - size;
583 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
584 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
585 }
586 lowest_start = align_up(lowest_start, attach_point_alignment);
587 try_reserve_range(highest_start, lowest_start, attach_point_alignment,
588 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
589 }
590
591 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
592 // implement null checks.
593 noaccess_prefix = noaccess_prefix_size(alignment);
621
622 if (heap_allocation_directory != nullptr) {
623 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
624 if (_fd_for_heap == -1) {
625 vm_exit_during_initialization(
626 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
627 }
628 // When there is a backing file directory for this space then whether
629 // large pages are allocated is up to the filesystem of the backing file.
630 // If requested, let the user know that explicit large pages can't be used.
631 if (use_explicit_large_pages(page_size) && large_pages_requested()) {
632 log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
633 }
634 }
635
636 // Heap size should be aligned to alignment, too.
637 guarantee(is_aligned(size, alignment), "set by caller");
638
639 if (UseCompressedOops) {
640 initialize_compressed_heap(size, alignment, page_size);
641 if (_size > size LP64_ONLY(|| UseCompatibleCompressedOops)) {
642 // We allocated heap with noaccess prefix.
643 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
644 // if we had to try at arbitrary address.
645 establish_noaccess_prefix();
646 }
647 } else {
648 initialize(size, alignment, page_size, nullptr, false);
649 }
650
651 assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
652 "area must be distinguishable from marks for mark-sweep");
653 assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
654 "area must be distinguishable from marks for mark-sweep");
655
656 if (base() != nullptr) {
657 MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
658 }
659
660 if (_fd_for_heap != -1) {
661 ::close(_fd_for_heap);
|