486 uint start = i;
487
488 // Avoid more steps than requested.
489 i = 0;
490 while (addresses[start+i] != 0) {
491 if (i == HeapSearchSteps) {
492 addresses[start+i] = 0;
493 break;
494 }
495 i++;
496 }
497
498 return (char**) &addresses[start];
499 }
500
501 // Create protection page at the beginning of the space.
502 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
503 assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
504 assert(reserved.is_reserved(), "should only be called on a reserved memory area");
505
506 if (reserved.end() > (char *)OopEncodingHeapMax) {
507 if (true
508 WIN64_ONLY(&& !UseLargePages)
509 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
510 // Protect memory at the base of the allocated region.
511 if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
512 fatal("cannot protect protection page");
513 }
514 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
515 PTR_FORMAT " / %zd bytes",
516 p2i(reserved.base()),
517 noaccess_prefix);
518 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
519 } else {
520 CompressedOops::set_use_implicit_null_checks(false);
521 }
522 }
523
524 return reserved.last_part(noaccess_prefix);
525 }
526
532 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
533
534 assert(alignment >= os::vm_page_size(), "alignment too small");
535 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
536 assert(is_power_of_2(alignment), "not a power of 2");
537
538 // The necessary attach point alignment for generated wish addresses.
539 // This is needed to increase the chance of attaching for mmap and shmat.
540 // AIX is the only platform that uses System V shm for reserving virtual memory.
541 // In this case, the required alignment of the allocated size (64K) and the alignment
542 // of possible start points of the memory region (256M) differ.
543 // This is not reflected by os_allocation_granularity().
544 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
545 const size_t os_attach_point_alignment =
546 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
547 NOT_AIX(os::vm_allocation_granularity());
548
549 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
550
551 char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
552 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
553 noaccess_prefix_size : 0;
554
555 ReservedSpace reserved{};
556
557 // Attempt to alloc at user-given address.
558 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
559 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
560 if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
561 release(reserved);
562 reserved = {};
563 }
564 }
565
566 // Keep heap at HeapBaseMinAddress.
567 if (!reserved.is_reserved()) {
568
569 // Try to allocate the heap at addresses that allow efficient oop compression.
570 // Different schemes are tried, in order of decreasing optimization potential.
571 //
572 // For this, try_reserve_heap() is called with the desired heap base addresses.
573 // A call into the os layer to allocate at a given address can return memory
574 // at a different address than requested. Still, this might be memory at a useful
575 // address. try_reserve_heap() always returns this allocated memory, as only here
576 // the criteria for a good heap are checked.
577
578 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
579 // Give it several tries from top of range to bottom.
580 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
581
582 // Calc address range within we try to attach (range of possible start addresses).
583 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
584 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
585 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
586 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
587 }
588
589 // zerobased: Attempt to allocate in the lower 32G.
590 char *zerobased_max = (char *)OopEncodingHeapMax;
591
592 // Give it several tries from top of range to bottom.
593 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
594 ((!reserved.is_reserved()) || // No previous try succeeded.
595 (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
596
597 // Release previous reservation
598 release(reserved);
599
600 // Calc address range within we try to attach (range of possible start addresses).
601 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
602 // Need to be careful about size being guaranteed to be less
603 // than UnscaledOopHeapMax due to type constraints.
604 char *lowest_start = aligned_heap_base_min_address;
605 uint64_t unscaled_end = UnscaledOopHeapMax - size;
606 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
607 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
608 }
609 lowest_start = align_up(lowest_start, attach_point_alignment);
610 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
611 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
612 }
613
614 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
615 // implement null checks.
641 }
642 }
643
644 // No more reserve attempts
645
646 if (reserved.is_reserved()) {
647 // Successfully found and reserved memory for the heap.
648
649 if (reserved.size() > size) {
650 // We reserved heap memory with a noaccess prefix.
651
652 assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
653 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
654 // if we had to try at arbitrary address.
655 reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
656 assert(reserved.size() == size, "Prefix should be gone");
657 return ReservedHeapSpace(reserved, noaccess_prefix);
658 }
659
660 // We reserved heap memory without a noaccess prefix.
661 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
662 }
663
664 // Failed
665 return {};
666 }
667
668 #endif // _LP64
669
670 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
671 if (UseCompressedOops) {
672 #ifdef _LP64
673 return reserve_compressed_oops_heap(size, alignment, page_size);
674 #endif
675 } else {
676 return reserve_uncompressed_oops_heap(size, alignment, page_size);
677 }
678 }
679
680 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
|
486 uint start = i;
487
488 // Avoid more steps than requested.
489 i = 0;
490 while (addresses[start+i] != 0) {
491 if (i == HeapSearchSteps) {
492 addresses[start+i] = 0;
493 break;
494 }
495 i++;
496 }
497
498 return (char**) &addresses[start];
499 }
500
501 // Create protection page at the beginning of the space.
502 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
503 assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
504 assert(reserved.is_reserved(), "should only be called on a reserved memory area");
505
506 if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
507 assert((reserved.base() != nullptr), "sanity");
508 if (true
509 WIN64_ONLY(&& !UseLargePages)
510 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
511 // Protect memory at the base of the allocated region.
512 if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
513 fatal("cannot protect protection page");
514 }
515 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
516 PTR_FORMAT " / %zd bytes",
517 p2i(reserved.base()),
518 noaccess_prefix);
519 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
520 } else {
521 CompressedOops::set_use_implicit_null_checks(false);
522 }
523 }
524
525 return reserved.last_part(noaccess_prefix);
526 }
527
533 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
534
535 assert(alignment >= os::vm_page_size(), "alignment too small");
536 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
537 assert(is_power_of_2(alignment), "not a power of 2");
538
539 // The necessary attach point alignment for generated wish addresses.
540 // This is needed to increase the chance of attaching for mmap and shmat.
541 // AIX is the only platform that uses System V shm for reserving virtual memory.
542 // In this case, the required alignment of the allocated size (64K) and the alignment
543 // of possible start points of the memory region (256M) differ.
544 // This is not reflected by os_allocation_granularity().
545 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
546 const size_t os_attach_point_alignment =
547 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
548 NOT_AIX(os::vm_allocation_granularity());
549
550 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
551
552 char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
553 char* heap_end_address = aligned_heap_base_min_address + size;
554
555 bool unscaled = false;
556 bool zerobased = false;
557 if (!UseCompatibleCompressedOops) { // heap base is not enforced
558 unscaled = (heap_end_address <= (char*)UnscaledOopHeapMax);
559 zerobased = (heap_end_address <= (char*)OopEncodingHeapMax);
560 }
561 size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
562
563 ReservedSpace reserved{};
564
565 // Attempt to alloc at user-given address.
566 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
567 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
568 if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
569 release(reserved);
570 reserved = {};
571 }
572 }
573
574 // Keep heap at HeapBaseMinAddress.
575 if (!reserved.is_reserved()) {
576
577 // Try to allocate the heap at addresses that allow efficient oop compression.
578 // Different schemes are tried, in order of decreasing optimization potential.
579 //
580 // For this, try_reserve_heap() is called with the desired heap base addresses.
581 // A call into the os layer to allocate at a given address can return memory
582 // at a different address than requested. Still, this might be memory at a useful
583 // address. try_reserve_heap() always returns this allocated memory, as only here
584 // the criteria for a good heap are checked.
585
586 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
587 // Give it several tries from top of range to bottom.
588 if (unscaled) {
589
590 // Calc address range within we try to attach (range of possible start addresses).
591 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
592 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
593 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
594 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
595 }
596
597 // zerobased: Attempt to allocate in the lower 32G.
598 char *zerobased_max = (char *)OopEncodingHeapMax;
599
600 // Give it several tries from top of range to bottom.
601 if (zerobased && // Zerobased theoretical possible.
602 ((!reserved.is_reserved()) || // No previous try succeeded.
603 (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
604
605 // Release previous reservation
606 release(reserved);
607
608 // Calc address range within we try to attach (range of possible start addresses).
609 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
610 // Need to be careful about size being guaranteed to be less
611 // than UnscaledOopHeapMax due to type constraints.
612 char *lowest_start = aligned_heap_base_min_address;
613 uint64_t unscaled_end = UnscaledOopHeapMax - size;
614 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
615 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
616 }
617 lowest_start = align_up(lowest_start, attach_point_alignment);
618 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
619 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
620 }
621
622 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
623 // implement null checks.
649 }
650 }
651
652 // No more reserve attempts
653
654 if (reserved.is_reserved()) {
655 // Successfully found and reserved memory for the heap.
656
657 if (reserved.size() > size) {
658 // We reserved heap memory with a noaccess prefix.
659
660 assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
661 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
662 // if we had to try at arbitrary address.
663 reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
664 assert(reserved.size() == size, "Prefix should be gone");
665 return ReservedHeapSpace(reserved, noaccess_prefix);
666 }
667
668 // We reserved heap memory without a noaccess prefix.
669 assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
670 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
671 }
672
673 // Failed
674 return {};
675 }
676
677 #endif // _LP64
678
679 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
680 if (UseCompressedOops) {
681 #ifdef _LP64
682 return reserve_compressed_oops_heap(size, alignment, page_size);
683 #endif
684 } else {
685 return reserve_uncompressed_oops_heap(size, alignment, page_size);
686 }
687 }
688
689 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
|