< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page

 517          "size not aligned to os::vm_allocation_granularity()");
 518   assert((alignment & (granularity - 1)) == 0,
 519          "alignment not aligned to os::vm_allocation_granularity()");
 520   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 521          "not a power of 2");
 522 
 523   // The necessary attach point alignment for generated wish addresses.
 524   // This is needed to increase the chance of attaching for mmap and shmat.
 525   // AIX is the only platform that uses System V shm for reserving virtual memory.
 526   // In this case, the required alignment of the allocated size (64K) and the alignment
 527   // of possible start points of the memory region (256M) differ.
 528   // This is not reflected by os_allocation_granularity().
 529   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
 530   const size_t os_attach_point_alignment =
 531     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
 532     NOT_AIX(os::vm_allocation_granularity());
 533 
 534   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 535 
 536   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 537   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 538     noaccess_prefix_size(alignment) : 0;
 539 
 540   // Attempt to alloc at user-given address.
 541   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 542     try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
 543     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 544       release();
 545     }
 546   }
 547 
 548   // Keep heap at HeapBaseMinAddress.
 549   if (_base == nullptr) {
 550 
 551     // Try to allocate the heap at addresses that allow efficient oop compression.
 552     // Different schemes are tried, in order of decreasing optimization potential.
 553     //
 554     // For this, try_reserve_heap() is called with the desired heap base addresses.
 555     // A call into the os layer to allocate at a given address can return memory
 556     // at a different address than requested.  Still, this might be memory at a useful
 557     // address. try_reserve_heap() always returns this allocated memory, as only here
 558     // the criteria for a good heap are checked.
 559 
 560     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 561     // Give it several tries from top of range to bottom.
 562     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 566       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 567       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 568                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
 569     }
 570 
 571     // zerobased: Attempt to allocate in the lower 32G.
 572     char *zerobased_max = (char *)OopEncodingHeapMax;
 573 
 574     // Give it several tries from top of range to bottom.
 575     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.

 576         ((_base == nullptr) ||                        // No previous try succeeded.
 577          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 578 
 579       // Calc address range within we try to attach (range of possible start addresses).
 580       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 581       // Need to be careful about size being guaranteed to be less
 582       // than UnscaledOopHeapMax due to type constraints.
 583       char *lowest_start = aligned_heap_base_min_address;
 584       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 585       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 586         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 587       }
 588       lowest_start = align_up(lowest_start, attach_point_alignment);
 589       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 590                         aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
 591     }
 592 
 593     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 594     // implement null checks.
 595     noaccess_prefix = noaccess_prefix_size(alignment);

 626   if (heap_allocation_directory != nullptr) {
 627     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 628     if (_fd_for_heap == -1) {
 629       vm_exit_during_initialization(
 630         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 631     }
 632     // When there is a backing file directory for this space then whether
 633     // large pages are allocated is up to the filesystem of the backing file.
 634     // If requested, let the user know that explicit large pages can't be used.
 635     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
 636       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
 637     }
 638   }
 639 
 640   // Heap size should be aligned to alignment, too.
 641   guarantee(is_aligned(size, alignment), "set by caller");
 642 
 643   if (UseCompressedOops) {
 644 #ifdef _LP64
 645     initialize_compressed_heap(size, alignment, page_size);
 646     if (_size > size) {
 647       // We allocated heap with noaccess prefix.
 648       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 649       // if we had to try at arbitrary address.
 650       establish_noaccess_prefix();
 651     }
 652 #else
 653     ShouldNotReachHere();
 654 #endif // _LP64
 655   } else {
 656     initialize(size, alignment, page_size, nullptr, false);
 657   }
 658 
 659   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 660          "area must be distinguishable from marks for mark-sweep");
 661   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 662          "area must be distinguishable from marks for mark-sweep");
 663 
 664   if (base() != nullptr) {
 665     MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
 666   }

 517          "size not aligned to os::vm_allocation_granularity()");
 518   assert((alignment & (granularity - 1)) == 0,
 519          "alignment not aligned to os::vm_allocation_granularity()");
 520   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 521          "not a power of 2");
 522 
 523   // The necessary attach point alignment for generated wish addresses.
 524   // This is needed to increase the chance of attaching for mmap and shmat.
 525   // AIX is the only platform that uses System V shm for reserving virtual memory.
 526   // In this case, the required alignment of the allocated size (64K) and the alignment
 527   // of possible start points of the memory region (256M) differ.
 528   // This is not reflected by os_allocation_granularity().
 529   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
 530   const size_t os_attach_point_alignment =
 531     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
 532     NOT_AIX(os::vm_allocation_granularity());
 533 
 534   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 535 
 536   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 537   size_t noaccess_prefix = (((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) LP64_ONLY(|| UseCompatibleCompressedOops)) ?
 538     noaccess_prefix_size(alignment) : 0;
 539 
 540   // Attempt to alloc at user-given address.
 541   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) LP64_ONLY(|| UseCompatibleCompressedOops)) {
 542     try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
 543     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 544       release();
 545     }
 546   }
 547 
 548   // Keep heap at HeapBaseMinAddress.
 549   if (_base == nullptr) {
 550 
 551     // Try to allocate the heap at addresses that allow efficient oop compression.
 552     // Different schemes are tried, in order of decreasing optimization potential.
 553     //
 554     // For this, try_reserve_heap() is called with the desired heap base addresses.
 555     // A call into the os layer to allocate at a given address can return memory
 556     // at a different address than requested.  Still, this might be memory at a useful
 557     // address. try_reserve_heap() always returns this allocated memory, as only here
 558     // the criteria for a good heap are checked.
 559 
 560     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 561     // Give it several tries from top of range to bottom.
 562     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax LP64_ONLY(&& !UseCompatibleCompressedOops)) {
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 566       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 567       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 568                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
 569     }
 570 
 571     // zerobased: Attempt to allocate in the lower 32G.
 572     char *zerobased_max = (char *)OopEncodingHeapMax;
 573 
 574     // Give it several tries from top of range to bottom.
 575     if (LP64_ONLY(!UseCompatibleCompressedOops &&)
 576         aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 577         ((_base == nullptr) ||                        // No previous try succeeded.
 578          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 579 
 580       // Calc address range within we try to attach (range of possible start addresses).
 581       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 582       // Need to be careful about size being guaranteed to be less
 583       // than UnscaledOopHeapMax due to type constraints.
 584       char *lowest_start = aligned_heap_base_min_address;
 585       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 586       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 587         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 588       }
 589       lowest_start = align_up(lowest_start, attach_point_alignment);
 590       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 591                         aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
 592     }
 593 
 594     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 595     // implement null checks.
 596     noaccess_prefix = noaccess_prefix_size(alignment);

 627   if (heap_allocation_directory != nullptr) {
 628     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 629     if (_fd_for_heap == -1) {
 630       vm_exit_during_initialization(
 631         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 632     }
 633     // When there is a backing file directory for this space then whether
 634     // large pages are allocated is up to the filesystem of the backing file.
 635     // If requested, let the user know that explicit large pages can't be used.
 636     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
 637       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
 638     }
 639   }
 640 
 641   // Heap size should be aligned to alignment, too.
 642   guarantee(is_aligned(size, alignment), "set by caller");
 643 
 644   if (UseCompressedOops) {
 645 #ifdef _LP64
 646     initialize_compressed_heap(size, alignment, page_size);
 647     if (_size > size LP64_ONLY(|| UseCompatibleCompressedOops)) {
 648       // We allocated heap with noaccess prefix.
 649       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 650       // if we had to try at arbitrary address.
 651       establish_noaccess_prefix();
 652     }
 653 #else
 654     ShouldNotReachHere();
 655 #endif // _LP64
 656   } else {
 657     initialize(size, alignment, page_size, nullptr, false);
 658   }
 659 
 660   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 661          "area must be distinguishable from marks for mark-sweep");
 662   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 663          "area must be distinguishable from marks for mark-sweep");
 664 
 665   if (base() != nullptr) {
 666     MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
 667   }
< prev index next >