1 /* 2 * Copyright (c) 2023, Red Hat, Inc. All rights reserved. 3 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "logging/log.hpp" 29 #include "oops/compressedKlass.hpp" 30 #include "memory/metaspace.hpp" 31 #include "runtime/os.hpp" 32 #include "utilities/globalDefinitions.hpp" 33 34 // Helper function; reserve at an address that is compatible with EOR 35 static char* reserve_at_eor_compatible_address(size_t size, bool aslr) { 36 char* result = nullptr; 37 38 log_debug(metaspace, map)("Trying to reserve at an EOR-compatible address"); 39 40 // We need immediates that are 32-bit aligned, since they should not intersect nKlass 41 // bits. They should not be larger than the addressable space either, but we still 42 // lack a good abstraction for that (see JDK-8320584), therefore we assume and hard-code 43 // 2^48 as a reasonable higher ceiling. 44 static const uint16_t immediates[] = { 45 0x0001, 0x0002, 0x0003, 0x0004, 0x0006, 0x0007, 0x0008, 0x000c, 0x000e, 46 0x000f, 0x0010, 0x0018, 0x001c, 0x001e, 0x001f, 0x0020, 0x0030, 0x0038, 47 0x003c, 0x003e, 0x003f, 0x0040, 0x0060, 0x0070, 0x0078, 0x007c, 0x007e, 48 0x007f, 0x0080, 0x00c0, 0x00e0, 0x00f0, 0x00f8, 0x00fc, 0x00fe, 0x00ff, 49 0x0100, 0x0180, 0x01c0, 0x01e0, 0x01f0, 0x01f8, 0x01fc, 0x01fe, 0x01ff, 50 0x0200, 0x0300, 0x0380, 0x03c0, 0x03e0, 0x03f0, 0x03f8, 0x03fc, 0x03fe, 51 0x03ff, 0x0400, 0x0600, 0x0700, 0x0780, 0x07c0, 0x07e0, 0x07f0, 0x07f8, 52 0x07fc, 0x07fe, 0x07ff, 0x0800, 0x0c00, 0x0e00, 0x0f00, 0x0f80, 0x0fc0, 53 0x0fe0, 0x0ff0, 0x0ff8, 0x0ffc, 0x0ffe, 0x0fff, 0x1000, 0x1800, 0x1c00, 54 0x1e00, 0x1f00, 0x1f80, 0x1fc0, 0x1fe0, 0x1ff0, 0x1ff8, 0x1ffc, 0x1ffe, 55 0x1fff, 0x2000, 0x3000, 0x3800, 0x3c00, 0x3e00, 0x3f00, 0x3f80, 0x3fc0, 56 0x3fe0, 0x3ff0, 0x3ff8, 0x3ffc, 0x3ffe, 0x3fff, 0x4000, 0x6000, 0x7000, 57 0x7800, 0x7c00, 0x7e00, 0x7f00, 0x7f80, 0x7fc0, 0x7fe0, 0x7ff0, 0x7ff8, 58 0x7ffc, 0x7ffe, 0x7fff 59 }; 60 static constexpr unsigned num_immediates = sizeof(immediates) / sizeof(immediates[0]); 61 const unsigned start_index = aslr ? os::next_random((int)os::javaTimeNanos()) : 0; 62 constexpr int max_tries = 64; 63 for (int ntry = 0; result == nullptr && ntry < max_tries; ntry ++) { 64 // As in os::attempt_reserve_memory_between, we alternate between higher and lower 65 // addresses; this maximizes the chance of early success if part of the address space 66 // is not accessible (e.g. 39-bit address space). 67 const unsigned alt_index = (ntry & 1) ? 0 : num_immediates / 2; 68 const unsigned index = (start_index + ntry + alt_index) % num_immediates; 69 const uint64_t immediate = ((uint64_t)immediates[index]) << 32; 70 assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate), 71 "Invalid immediate %d " UINT64_FORMAT, index, immediate); 72 result = os::attempt_reserve_memory_at((char*)immediate, size, false); 73 if (result == nullptr) { 74 log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate); 75 } 76 } 77 if (result == nullptr) { 78 log_debug(metaspace, map)("Failed to reserve at any EOR-compatible address"); 79 } 80 return result; 81 } 82 char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size_t size, bool aslr, bool optimize_for_zero_base) { 83 84 char* result = nullptr; 85 86 // Optimize for base=0 shift=0 87 if (optimize_for_zero_base) { 88 result = reserve_address_space_for_unscaled_encoding(size, aslr); 89 } 90 91 // If this fails, we don't bother aiming for zero-based encoding (base=0 shift>0), since it has no 92 // advantages over EOR or movk mode. 93 94 // EOR-compatible reservation 95 if (result == nullptr) { 96 result = reserve_at_eor_compatible_address(size, aslr); 97 } 98 99 // Movk-compatible reservation via probing. 100 if (result == nullptr) { 101 result = reserve_address_space_for_16bit_move(size, aslr); 102 } 103 104 // Movk-compatible reservation via overallocation. 105 // If that failed, attempt to allocate at any 4G-aligned address. Let the system decide where. For ASLR, 106 // we now rely on the system. 107 // Compared with the probing done above, this has two disadvantages: 108 // - on a kernel with 52-bit address space we may get an address that has bits set between [48, 52). 109 // In that case, we may need two movk moves (not yet implemented). 110 // - this technique leads to temporary over-reservation of address space; it will spike the vsize of 111 // the process. Therefore it may fail if a vsize limit is in place (e.g. ulimit -v). 112 if (result == nullptr) { 113 constexpr size_t alignment = nth_bit(32); 114 log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address"); 115 result = os::reserve_memory_aligned(size, alignment, false); 116 } 117 118 return result; 119 } 120 121 bool CompressedKlassPointers::pd_initialize(address addr, size_t len) { 122 123 if (tiny_classpointer_mode()) { 124 // In tiny-classpointer mode, we do what all other platforms do. 125 return false; 126 } 127 128 // Aarch64 uses an own initialization logic that avoids zero-base shifted mode 129 // (_base=0 _shift>0), instead preferring non-zero-based mode with shift=0 130 constexpr uintptr_t unscaled_max = nth_bit(32); 131 assert(len <= unscaled_max, "Klass range larger than 32 bits?"); 132 133 _shift = 0; 134 135 address const end = addr + len; 136 _base = (end <= (address)unscaled_max) ? nullptr : addr; 137 138 _range = end - _base; 139 140 #ifdef ASSERT 141 _klass_range_start = addr; 142 _klass_range_end = addr + len; 143 calc_lowest_highest_narrow_klass_id(); 144 sanity_check_after_initialization(); 145 #endif 146 147 return true; 148 }