< prev index next > src/hotspot/share/memory/virtualspace.cpp
Print this page
NOT_AIX(os::vm_allocation_granularity());
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
! size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
noaccess_prefix_size(alignment) : 0;
// Attempt to alloc at user-given address.
! if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
release();
}
}
NOT_AIX(os::vm_allocation_granularity());
const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
! size_t noaccess_prefix = (((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) LP64_ONLY(|| UseCompatibleCompressedOops)) ?
noaccess_prefix_size(alignment) : 0;
// Attempt to alloc at user-given address.
! if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) LP64_ONLY(|| UseCompatibleCompressedOops)) {
try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
release();
}
}
// address. try_reserve_heap() always returns this allocated memory, as only here
// the criteria for a good heap are checked.
// Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
// Give it several tries from top of range to bottom.
! if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
// Calc address range within we try to attach (range of possible start addresses).
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
// address. try_reserve_heap() always returns this allocated memory, as only here
// the criteria for a good heap are checked.
// Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
// Give it several tries from top of range to bottom.
! if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax LP64_ONLY(&& !UseCompatibleCompressedOops)) {
// Calc address range within we try to attach (range of possible start addresses).
char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
try_reserve_range(highest_start, lowest_start, attach_point_alignment,
// zerobased: Attempt to allocate in the lower 32G.
char *zerobased_max = (char *)OopEncodingHeapMax;
// Give it several tries from top of range to bottom.
! if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
((_base == nullptr) || // No previous try succeeded.
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
// Calc address range within we try to attach (range of possible start addresses).
char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
// zerobased: Attempt to allocate in the lower 32G.
char *zerobased_max = (char *)OopEncodingHeapMax;
// Give it several tries from top of range to bottom.
! if (LP64_ONLY(!UseCompatibleCompressedOops &&)
+ aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible.
((_base == nullptr) || // No previous try succeeded.
(_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
// Calc address range within we try to attach (range of possible start addresses).
char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
// Heap size should be aligned to alignment, too.
guarantee(is_aligned(size, alignment), "set by caller");
if (UseCompressedOops) {
initialize_compressed_heap(size, alignment, page_size);
! if (_size > size) {
// We allocated heap with noaccess prefix.
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
// if we had to try at arbitrary address.
establish_noaccess_prefix();
}
// Heap size should be aligned to alignment, too.
guarantee(is_aligned(size, alignment), "set by caller");
if (UseCompressedOops) {
initialize_compressed_heap(size, alignment, page_size);
! if (_size > size LP64_ONLY(|| UseCompatibleCompressedOops)) {
// We allocated heap with noaccess prefix.
// It can happen we get a zerobased/unscaled heap with noaccess prefix,
// if we had to try at arbitrary address.
establish_noaccess_prefix();
}
< prev index next >