1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "memory/arena.hpp" 29 #include "memory/metaspace.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "nmt/memTracker.hpp" 32 #include "runtime/os.hpp" 33 #include "runtime/task.hpp" 34 #include "runtime/threadCritical.hpp" 35 #include "utilities/ostream.hpp" 36 37 // allocate using malloc; will fail if no memory available 38 char* AllocateHeap(size_t size, 39 MemTag mem_tag, 40 const NativeCallStack& stack, 41 AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { 42 char* p = (char*) os::malloc(size, mem_tag, stack); 43 if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 44 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); 45 } 46 return p; 47 } 48 49 char* AllocateHeap(size_t size, 50 MemTag mem_tag, 51 AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { 52 return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode); 53 } 54 55 char* ReallocateHeap(char *old, 56 size_t size, 57 MemTag mem_tag, 58 AllocFailType alloc_failmode) { 59 char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC); 60 if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 61 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); 62 } 63 return p; 64 } 65 66 // handles null pointers 67 void FreeHeap(void* p) { 68 os::free(p); 69 } 70 71 void* MetaspaceObj::_shared_metaspace_base = nullptr; 72 void* MetaspaceObj::_shared_metaspace_top = nullptr; 73 74 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 75 size_t word_size, 76 MetaspaceObj::Type type, TRAPS) throw() { 77 // Klass has its own operator new 78 assert(type != ClassType, "class has its own operator new"); 79 return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false, THREAD); 80 } 81 82 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 83 size_t word_size, 84 MetaspaceObj::Type type) throw() { 85 assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread"); 86 assert(type != ClassType, "class has its own operator new"); 87 return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false); 88 } 89 90 91 // Work-around -- see JDK-8331086 92 void* MetaspaceObj::operator new(size_t size, MemTag flags) throw() { 93 void* p = AllocateHeap(size, flags, CALLER_PC); 94 memset(p, 0, size); 95 return p; 96 } 97 98 bool MetaspaceObj::is_valid(const MetaspaceObj* p) { 99 // Weed out obvious bogus values first without traversing metaspace 100 if ((size_t)p < os::min_page_size()) { 101 return false; 102 } else if (!is_aligned((address)p, sizeof(MetaWord))) { 103 return false; 104 } 105 return Metaspace::contains((void*)p); 106 } 107 108 void MetaspaceObj::print_address_on(outputStream* st) const { 109 st->print(" {" PTR_FORMAT "}", p2i(this)); 110 } 111 112 // 113 // ArenaObj 114 // 115 116 void* ArenaObj::operator new(size_t size, Arena *arena) throw() { 117 return arena->Amalloc(size); 118 } 119 120 // 121 // AnyObj 122 // 123 124 void* AnyObj::operator new(size_t size, Arena *arena) { 125 address res = (address)arena->Amalloc(size); 126 DEBUG_ONLY(set_allocation_type(res, ARENA);) 127 return res; 128 } 129 130 void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() { 131 address res = (address)AllocateHeap(size, mem_tag, CALLER_PC); 132 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 133 return res; 134 } 135 136 void* AnyObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 137 MemTag mem_tag) throw() { 138 // should only call this with std::nothrow, use other operator new() otherwise 139 address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL); 140 DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);) 141 return res; 142 } 143 144 void AnyObj::operator delete(void* p) { 145 if (p == nullptr) { 146 return; 147 } 148 assert(((AnyObj *)p)->allocated_on_C_heap(), 149 "delete only allowed for C_HEAP objects"); 150 DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 151 FreeHeap(p); 152 } 153 154 #ifdef ASSERT 155 void AnyObj::set_allocation_type(address res, allocation_type type) { 156 // Set allocation type in the resource object 157 uintptr_t allocation = (uintptr_t)res; 158 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res)); 159 assert(type <= allocation_mask, "incorrect allocation type"); 160 AnyObj* resobj = (AnyObj *)res; 161 resobj->_allocation_t[0] = ~(allocation + type); 162 if (type != STACK_OR_EMBEDDED) { 163 // Called from operator new(), set verification value. 164 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 165 } 166 } 167 168 AnyObj::allocation_type AnyObj::get_allocation_type() const { 169 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 170 return (allocation_type)((~_allocation_t[0]) & allocation_mask); 171 } 172 173 bool AnyObj::is_type_set() const { 174 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 175 return get_allocation_type() == type && 176 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 177 } 178 179 // This whole business of passing information from AnyObj::operator new 180 // to the AnyObj constructor via fields in the "object" is technically UB. 181 // But it seems to work within the limitations of HotSpot usage (such as no 182 // multiple inheritance) with the compilers and compiler options we're using. 183 // And it gives some possibly useful checking for misuse of AnyObj. 184 void AnyObj::initialize_allocation_info() { 185 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 186 // Operator new() is not called for allocations 187 // on stack and for embedded objects. 188 set_allocation_type((address)this, STACK_OR_EMBEDDED); 189 } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED 190 // For some reason we got a value which resembles 191 // an embedded or stack object (operator new() does not 192 // set such type). Keep it since it is valid value 193 // (even if it was garbage). 194 // Ignore garbage in other fields. 195 } else if (is_type_set()) { 196 // Operator new() was called and type was set. 197 assert(!allocated_on_stack_or_embedded(), 198 "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 199 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); 200 } else { 201 // Operator new() was not called. 202 // Assume that it is embedded or stack object. 203 set_allocation_type((address)this, STACK_OR_EMBEDDED); 204 } 205 _allocation_t[1] = 0; // Zap verification value 206 } 207 208 AnyObj::AnyObj() { 209 initialize_allocation_info(); 210 } 211 212 AnyObj::AnyObj(const AnyObj&) { 213 // Initialize _allocation_t as a new object, ignoring object being copied. 214 initialize_allocation_info(); 215 } 216 217 AnyObj& AnyObj::operator=(const AnyObj& r) { 218 assert(allocated_on_stack_or_embedded(), 219 "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 220 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); 221 // Keep current _allocation_t value; 222 return *this; 223 } 224 225 AnyObj::~AnyObj() { 226 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 227 if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap. 228 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 229 } 230 } 231 #endif // ASSERT 232 233 //-------------------------------------------------------------------------------------- 234 // Non-product code 235 236 #ifndef PRODUCT 237 void AnyObj::print() const { print_on(tty); } 238 239 void AnyObj::print_on(outputStream* st) const { 240 st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this)); 241 } 242 243 ReallocMark::ReallocMark() { 244 #ifdef ASSERT 245 Thread *thread = Thread::current(); 246 _nesting = thread->resource_area()->nesting(); 247 #endif 248 } 249 250 void ReallocMark::check(Arena* arena) { 251 #ifdef ASSERT 252 if ((arena == nullptr || arena == Thread::current()->resource_area()) && 253 _nesting != Thread::current()->resource_area()->nesting()) { 254 fatal("allocation bug: array could grow within nested ResourceMark"); 255 } 256 #endif 257 } 258 259 #endif // Non-product