484 uint start = i;
485
486 // Avoid more steps than requested.
487 i = 0;
488 while (addresses[start+i] != 0) {
489 if (i == HeapSearchSteps) {
490 addresses[start+i] = 0;
491 break;
492 }
493 i++;
494 }
495
496 return (char**) &addresses[start];
497 }
498
499 // Create protection page at the beginning of the space.
500 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
501 assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
502 assert(reserved.is_reserved(), "should only be called on a reserved memory area");
503
504 if (reserved.end() > (char *)OopEncodingHeapMax) {
505 if (true
506 WIN64_ONLY(&& !UseLargePages)
507 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
508 // Protect memory at the base of the allocated region.
509 if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
510 fatal("cannot protect protection page");
511 }
512 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
513 PTR_FORMAT " / %zd bytes",
514 p2i(reserved.base()),
515 noaccess_prefix);
516 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
517 } else {
518 CompressedOops::set_use_implicit_null_checks(false);
519 }
520 }
521
522 return reserved.last_part(noaccess_prefix);
523 }
524
530 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
531
532 assert(alignment >= os::vm_page_size(), "alignment too small");
533 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
534 assert(is_power_of_2(alignment), "not a power of 2");
535
536 // The necessary attach point alignment for generated wish addresses.
537 // This is needed to increase the chance of attaching for mmap and shmat.
538 // AIX is the only platform that uses System V shm for reserving virtual memory.
539 // In this case, the required alignment of the allocated size (64K) and the alignment
540 // of possible start points of the memory region (256M) differ.
541 // This is not reflected by os_allocation_granularity().
542 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
543 const size_t os_attach_point_alignment =
544 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
545 NOT_AIX(os::vm_allocation_granularity());
546
547 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
548
549 char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
550 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
551 noaccess_prefix_size : 0;
552
553 ReservedSpace reserved{};
554
555 // Attempt to alloc at user-given address.
556 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
557 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
558 if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
559 release(reserved);
560 reserved = {};
561 }
562 }
563
564 // Keep heap at HeapBaseMinAddress.
565 if (!reserved.is_reserved()) {
566
567 // Try to allocate the heap at addresses that allow efficient oop compression.
568 // Different schemes are tried, in order of decreasing optimization potential.
569 //
570 // For this, try_reserve_heap() is called with the desired heap base addresses.
571 // A call into the os layer to allocate at a given address can return memory
572 // at a different address than requested. Still, this might be memory at a useful
573 // address. try_reserve_heap() always returns this allocated memory, as only here
574 // the criteria for a good heap are checked.
575
576 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
577 // Give it several tries from top of range to bottom.
578 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
579
580 // Calc address range within we try to attach (range of possible start addresses).
581 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
582 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
583 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
584 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
585 }
586
587 // zerobased: Attempt to allocate in the lower 32G.
588 char *zerobased_max = (char *)OopEncodingHeapMax;
589
590 // Give it several tries from top of range to bottom.
591 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
592 ((!reserved.is_reserved()) || // No previous try succeeded.
593 (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
594
595 // Release previous reservation
596 release(reserved);
597
598 // Calc address range within we try to attach (range of possible start addresses).
599 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
600 // Need to be careful about size being guaranteed to be less
601 // than UnscaledOopHeapMax due to type constraints.
602 char *lowest_start = aligned_heap_base_min_address;
603 uint64_t unscaled_end = UnscaledOopHeapMax - size;
604 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
605 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
606 }
607 lowest_start = align_up(lowest_start, attach_point_alignment);
608 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
609 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
610 }
611
612 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
613 // implement null checks.
639 }
640 }
641
642 // No more reserve attempts
643
644 if (reserved.is_reserved()) {
645 // Successfully found and reserved memory for the heap.
646
647 if (reserved.size() > size) {
648 // We reserved heap memory with a noaccess prefix.
649
650 assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
651 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
652 // if we had to try at arbitrary address.
653 reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
654 assert(reserved.size() == size, "Prefix should be gone");
655 return ReservedHeapSpace(reserved, noaccess_prefix);
656 }
657
658 // We reserved heap memory without a noaccess prefix.
659 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
660 }
661
662 // Failed
663 return {};
664 }
665
666 #endif // _LP64
667
668 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
669 if (UseCompressedOops) {
670 #ifdef _LP64
671 return reserve_compressed_oops_heap(size, alignment, page_size);
672 #endif
673 } else {
674 return reserve_uncompressed_oops_heap(size, alignment, page_size);
675 }
676 }
677
678 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
|
484 uint start = i;
485
486 // Avoid more steps than requested.
487 i = 0;
488 while (addresses[start+i] != 0) {
489 if (i == HeapSearchSteps) {
490 addresses[start+i] = 0;
491 break;
492 }
493 i++;
494 }
495
496 return (char**) &addresses[start];
497 }
498
499 // Create protection page at the beginning of the space.
500 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
501 assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
502 assert(reserved.is_reserved(), "should only be called on a reserved memory area");
503
504 if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
505 assert((reserved.base() != nullptr), "sanity");
506 if (true
507 WIN64_ONLY(&& !UseLargePages)
508 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
509 // Protect memory at the base of the allocated region.
510 if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
511 fatal("cannot protect protection page");
512 }
513 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
514 PTR_FORMAT " / %zd bytes",
515 p2i(reserved.base()),
516 noaccess_prefix);
517 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
518 } else {
519 CompressedOops::set_use_implicit_null_checks(false);
520 }
521 }
522
523 return reserved.last_part(noaccess_prefix);
524 }
525
531 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
532
533 assert(alignment >= os::vm_page_size(), "alignment too small");
534 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
535 assert(is_power_of_2(alignment), "not a power of 2");
536
537 // The necessary attach point alignment for generated wish addresses.
538 // This is needed to increase the chance of attaching for mmap and shmat.
539 // AIX is the only platform that uses System V shm for reserving virtual memory.
540 // In this case, the required alignment of the allocated size (64K) and the alignment
541 // of possible start points of the memory region (256M) differ.
542 // This is not reflected by os_allocation_granularity().
543 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
544 const size_t os_attach_point_alignment =
545 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
546 NOT_AIX(os::vm_allocation_granularity());
547
548 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
549
550 char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
551 char* heap_end_address = aligned_heap_base_min_address + size;
552
553 bool unscaled = false;
554 bool zerobased = false;
555 if (!UseCompatibleCompressedOops) { // heap base is not enforced
556 unscaled = (heap_end_address <= (char*)UnscaledOopHeapMax);
557 zerobased = (heap_end_address <= (char*)OopEncodingHeapMax);
558 }
559 size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
560
561 ReservedSpace reserved{};
562
563 // Attempt to alloc at user-given address.
564 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
565 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
566 if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
567 release(reserved);
568 reserved = {};
569 }
570 }
571
572 // Keep heap at HeapBaseMinAddress.
573 if (!reserved.is_reserved()) {
574
575 // Try to allocate the heap at addresses that allow efficient oop compression.
576 // Different schemes are tried, in order of decreasing optimization potential.
577 //
578 // For this, try_reserve_heap() is called with the desired heap base addresses.
579 // A call into the os layer to allocate at a given address can return memory
580 // at a different address than requested. Still, this might be memory at a useful
581 // address. try_reserve_heap() always returns this allocated memory, as only here
582 // the criteria for a good heap are checked.
583
584 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
585 // Give it several tries from top of range to bottom.
586 if (unscaled) {
587
588 // Calc address range within we try to attach (range of possible start addresses).
589 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
590 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
591 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
592 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
593 }
594
595 // zerobased: Attempt to allocate in the lower 32G.
596 char *zerobased_max = (char *)OopEncodingHeapMax;
597
598 // Give it several tries from top of range to bottom.
599 if (zerobased && // Zerobased theoretical possible.
600 ((!reserved.is_reserved()) || // No previous try succeeded.
601 (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
602
603 // Release previous reservation
604 release(reserved);
605
606 // Calc address range within we try to attach (range of possible start addresses).
607 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
608 // Need to be careful about size being guaranteed to be less
609 // than UnscaledOopHeapMax due to type constraints.
610 char *lowest_start = aligned_heap_base_min_address;
611 uint64_t unscaled_end = UnscaledOopHeapMax - size;
612 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
613 lowest_start = MAX2(lowest_start, (char*)unscaled_end);
614 }
615 lowest_start = align_up(lowest_start, attach_point_alignment);
616 reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
617 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
618 }
619
620 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
621 // implement null checks.
647 }
648 }
649
650 // No more reserve attempts
651
652 if (reserved.is_reserved()) {
653 // Successfully found and reserved memory for the heap.
654
655 if (reserved.size() > size) {
656 // We reserved heap memory with a noaccess prefix.
657
658 assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
659 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
660 // if we had to try at arbitrary address.
661 reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
662 assert(reserved.size() == size, "Prefix should be gone");
663 return ReservedHeapSpace(reserved, noaccess_prefix);
664 }
665
666 // We reserved heap memory without a noaccess prefix.
667 assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
668 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
669 }
670
671 // Failed
672 return {};
673 }
674
675 #endif // _LP64
676
677 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
678 if (UseCompressedOops) {
679 #ifdef _LP64
680 return reserve_compressed_oops_heap(size, alignment, page_size);
681 #endif
682 } else {
683 return reserve_uncompressed_oops_heap(size, alignment, page_size);
684 }
685 }
686
687 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
|