1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "memory/allocation.hpp"
 26 #include "memory/allocation.inline.hpp"
 27 #include "memory/arena.hpp"
 28 #include "memory/metaspace.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "nmt/memTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "runtime/task.hpp"
 33 #include "runtime/threadCritical.hpp"
 34 #include "utilities/ostream.hpp"
 35 
 36 // allocate using malloc; will fail if no memory available
 37 char* AllocateHeap(size_t size,
 38                    MemTag mem_tag,
 39                    const NativeCallStack& stack,
 40                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 41   char* p = (char*) os::malloc(size, mem_tag, stack);
 42   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 43     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
 44   }
 45   return p;
 46 }
 47 
 48 char* AllocateHeap(size_t size,
 49                    MemTag mem_tag,
 50                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 51   return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode);
 52 }
 53 
 54 char* ReallocateHeap(char *old,
 55                      size_t size,
 56                      MemTag mem_tag,
 57                      AllocFailType alloc_failmode) {
 58   char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC);
 59   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 60     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
 61   }
 62   return p;
 63 }
 64 
 65 // handles null pointers
 66 void FreeHeap(void* p) {
 67   os::free(p);
 68 }
 69 
 70 void* MetaspaceObj::_shared_metaspace_base = nullptr;
 71 void* MetaspaceObj::_shared_metaspace_top  = nullptr;
 72 
 73 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 74                                  size_t word_size,
 75                                  MetaspaceObj::Type type, TRAPS) throw() {
 76   // Klass has its own operator new
 77   assert(type != ClassType, "class has its own operator new");
 78   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false, THREAD);
 79 }
 80 
 81 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 82                                  size_t word_size,
 83                                  MetaspaceObj::Type type) throw() {
 84   assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread");
 85   assert(type != ClassType, "class has its own operator new");
 86   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false);
 87 }
 88 
 89 bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
 90   // Weed out obvious bogus values first without traversing metaspace
 91   if ((size_t)p < os::min_page_size()) {
 92     return false;
 93   } else if (!is_aligned((address)p, sizeof(MetaWord))) {
 94     return false;
 95   }
 96   return Metaspace::contains((void*)p);
 97 }
 98 
 99 void MetaspaceObj::print_address_on(outputStream* st) const {
100   st->print(" {" PTR_FORMAT "}", p2i(this));
101 }
102 
103 //
104 // ArenaObj
105 //
106 
107 void* ArenaObj::operator new(size_t size, Arena *arena) throw() {
108   return arena->Amalloc(size);
109 }
110 
111 //
112 // AnyObj
113 //
114 
115 void* AnyObj::operator new(size_t size, Arena *arena) {
116   address res = (address)arena->Amalloc(size);
117   DEBUG_ONLY(set_allocation_type(res, ARENA);)
118   return res;
119 }
120 
121 void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() {
122   address res = (address)AllocateHeap(size, mem_tag, CALLER_PC);
123   DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
124   return res;
125 }
126 
127 void* AnyObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
128     MemTag mem_tag) throw() {
129   // should only call this with std::nothrow, use other operator new() otherwise
130     address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL);
131     DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
132   return res;
133 }
134 
135 void AnyObj::operator delete(void* p) {
136   if (p == nullptr) {
137     return;
138   }
139   assert(((AnyObj *)p)->allocated_on_C_heap(),
140          "delete only allowed for C_HEAP objects");
141   DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
142   FreeHeap(p);
143 }
144 
145 #ifdef ASSERT
146 void AnyObj::set_allocation_type(address res, allocation_type type) {
147   // Set allocation type in the resource object
148   uintptr_t allocation = (uintptr_t)res;
149   assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res));
150   assert(type <= allocation_mask, "incorrect allocation type");
151   AnyObj* resobj = (AnyObj *)res;
152   resobj->_allocation_t[0] = ~(allocation + type);
153   if (type != STACK_OR_EMBEDDED) {
154     // Called from operator new(), set verification value.
155     resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
156   }
157 }
158 
159 AnyObj::allocation_type AnyObj::get_allocation_type() const {
160   assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
161   return (allocation_type)((~_allocation_t[0]) & allocation_mask);
162 }
163 
164 bool AnyObj::is_type_set() const {
165   allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
166   return get_allocation_type()  == type &&
167          (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
168 }
169 
170 // This whole business of passing information from AnyObj::operator new
171 // to the AnyObj constructor via fields in the "object" is technically UB.
172 // But it seems to work within the limitations of HotSpot usage (such as no
173 // multiple inheritance) with the compilers and compiler options we're using.
174 // And it gives some possibly useful checking for misuse of AnyObj.
175 void AnyObj::initialize_allocation_info() {
176   if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
177     // Operator new() is not called for allocations
178     // on stack and for embedded objects.
179     set_allocation_type((address)this, STACK_OR_EMBEDDED);
180   } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED
181     // For some reason we got a value which resembles
182     // an embedded or stack object (operator new() does not
183     // set such type). Keep it since it is valid value
184     // (even if it was garbage).
185     // Ignore garbage in other fields.
186   } else if (is_type_set()) {
187     // Operator new() was called and type was set.
188     assert(!allocated_on_stack_or_embedded(),
189            "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
190            p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
191   } else {
192     // Operator new() was not called.
193     // Assume that it is embedded or stack object.
194     set_allocation_type((address)this, STACK_OR_EMBEDDED);
195   }
196   _allocation_t[1] = 0; // Zap verification value
197 }
198 
199 AnyObj::AnyObj() {
200   initialize_allocation_info();
201 }
202 
203 AnyObj::AnyObj(const AnyObj&) {
204   // Initialize _allocation_t as a new object, ignoring object being copied.
205   initialize_allocation_info();
206 }
207 
208 AnyObj& AnyObj::operator=(const AnyObj& r) {
209   assert(allocated_on_stack_or_embedded(),
210          "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
211          p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
212   // Keep current _allocation_t value;
213   return *this;
214 }
215 
216 AnyObj::~AnyObj() {
217   // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
218   if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap.
219     _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
220   }
221 }
222 #endif // ASSERT
223 
224 //--------------------------------------------------------------------------------------
225 // Non-product code
226 
227 #ifndef PRODUCT
228 void AnyObj::print() const       { print_on(tty); }
229 
230 void AnyObj::print_on(outputStream* st) const {
231   st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this));
232 }
233 
234 ReallocMark::ReallocMark() {
235 #ifdef ASSERT
236   Thread *thread = Thread::current();
237   _nesting = thread->resource_area()->nesting();
238 #endif
239 }
240 
241 void ReallocMark::check(Arena* arena) {
242 #ifdef ASSERT
243   if ((arena == nullptr || arena == Thread::current()->resource_area()) &&
244       _nesting != Thread::current()->resource_area()->nesting()) {
245     fatal("allocation bug: array could grow within nested ResourceMark");
246   }
247 #endif
248 }
249 
250 #endif // Non-product