1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "memory/allocation.hpp"
 26 #include "memory/allocation.inline.hpp"
 27 #include "memory/arena.hpp"
 28 #include "memory/metaspace.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "nmt/memTracker.hpp"
 31 #include "runtime/os.hpp"
 32 #include "runtime/task.hpp"
 33 #include "utilities/ostream.hpp"
 34 
 35 // allocate using malloc; will fail if no memory available
 36 char* AllocateHeap(size_t size,
 37                    MemTag mem_tag,
 38                    const NativeCallStack& stack,
 39                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 40   char* p = (char*) os::malloc(size, mem_tag, stack);
 41   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 42     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
 43   }
 44   return p;
 45 }
 46 
 47 char* AllocateHeap(size_t size,
 48                    MemTag mem_tag,
 49                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 50   return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode);
 51 }
 52 
 53 char* ReallocateHeap(char *old,
 54                      size_t size,
 55                      MemTag mem_tag,
 56                      AllocFailType alloc_failmode) {
 57   char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC);
 58   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 59     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
 60   }
 61   return p;
 62 }
 63 
 64 // handles null pointers
 65 void FreeHeap(void* p) {
 66   os::free(p);
 67 }
 68 
 69 void* MetaspaceObj::_shared_metaspace_base = nullptr;
 70 void* MetaspaceObj::_shared_metaspace_top  = nullptr;
 71 
 72 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 73                                  size_t word_size,
 74                                  MetaspaceObj::Type type, TRAPS) throw() {
 75   // Klass has its own operator new
 76   assert(type != ClassType, "class has its own operator new");
 77   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false, THREAD);
 78 }
 79 
 80 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 81                                  size_t word_size,
 82                                  MetaspaceObj::Type type) throw() {
 83   assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread");
 84   assert(type != ClassType, "class has its own operator new");
 85   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false);
 86 }
 87 
 88 // This is used for allocating training data. We are allocating training data in many cases where a GC cannot be triggered.
 89 void* MetaspaceObj::operator new(size_t size, MemTag flags) {
 90   void* p = AllocateHeap(size, flags, CALLER_PC);
 91   memset(p, 0, size);
 92   return p;
 93 }
 94 
 95 bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
 96   // Weed out obvious bogus values first without traversing metaspace
 97   if ((size_t)p < os::min_page_size()) {
 98     return false;
 99   } else if (!is_aligned((address)p, sizeof(MetaWord))) {
100     return false;
101   }
102   return Metaspace::contains((void*)p);
103 }
104 
105 void MetaspaceObj::print_address_on(outputStream* st) const {
106   st->print(" {" PTR_FORMAT "}", p2i(this));
107 }
108 
109 //
110 // ArenaObj
111 //
112 
113 void* ArenaObj::operator new(size_t size, Arena *arena) throw() {
114   return arena->Amalloc(size);
115 }
116 
117 //
118 // AnyObj
119 //
120 
121 void* AnyObj::operator new(size_t size, Arena *arena) {
122   address res = (address)arena->Amalloc(size);
123   DEBUG_ONLY(set_allocation_type(res, ARENA);)
124   return res;
125 }
126 
127 void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() {
128   address res = (address)AllocateHeap(size, mem_tag, CALLER_PC);
129   DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
130   return res;
131 }
132 
133 void* AnyObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
134     MemTag mem_tag) throw() {
135   // should only call this with std::nothrow, use other operator new() otherwise
136     address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL);
137     DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
138   return res;
139 }
140 
141 void AnyObj::operator delete(void* p) {
142   if (p == nullptr) {
143     return;
144   }
145   assert(((AnyObj *)p)->allocated_on_C_heap(),
146          "delete only allowed for C_HEAP objects");
147   DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
148   FreeHeap(p);
149 }
150 
151 #ifdef ASSERT
152 void AnyObj::set_allocation_type(address res, allocation_type type) {
153   // Set allocation type in the resource object
154   uintptr_t allocation = (uintptr_t)res;
155   assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res));
156   assert(type <= allocation_mask, "incorrect allocation type");
157   AnyObj* resobj = (AnyObj *)res;
158   resobj->_allocation_t[0] = ~(allocation + type);
159   if (type != STACK_OR_EMBEDDED) {
160     // Called from operator new(), set verification value.
161     resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
162   }
163 }
164 
165 AnyObj::allocation_type AnyObj::get_allocation_type() const {
166   assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
167   return (allocation_type)((~_allocation_t[0]) & allocation_mask);
168 }
169 
170 bool AnyObj::is_type_set() const {
171   allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
172   return get_allocation_type()  == type &&
173          (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
174 }
175 
176 // This whole business of passing information from AnyObj::operator new
177 // to the AnyObj constructor via fields in the "object" is technically UB.
178 // But it seems to work within the limitations of HotSpot usage (such as no
179 // multiple inheritance) with the compilers and compiler options we're using.
180 // And it gives some possibly useful checking for misuse of AnyObj.
181 void AnyObj::initialize_allocation_info() {
182   if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
183     // Operator new() is not called for allocations
184     // on stack and for embedded objects.
185     set_allocation_type((address)this, STACK_OR_EMBEDDED);
186   } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED
187     // For some reason we got a value which resembles
188     // an embedded or stack object (operator new() does not
189     // set such type). Keep it since it is valid value
190     // (even if it was garbage).
191     // Ignore garbage in other fields.
192   } else if (is_type_set()) {
193     // Operator new() was called and type was set.
194     assert(!allocated_on_stack_or_embedded(),
195            "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
196            p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
197   } else {
198     // Operator new() was not called.
199     // Assume that it is embedded or stack object.
200     set_allocation_type((address)this, STACK_OR_EMBEDDED);
201   }
202   _allocation_t[1] = 0; // Zap verification value
203 }
204 
205 AnyObj::AnyObj() {
206   initialize_allocation_info();
207 }
208 
209 AnyObj::AnyObj(const AnyObj&) {
210   // Initialize _allocation_t as a new object, ignoring object being copied.
211   initialize_allocation_info();
212 }
213 
214 AnyObj& AnyObj::operator=(const AnyObj& r) {
215   assert(allocated_on_stack_or_embedded(),
216          "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
217          p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
218   // Keep current _allocation_t value;
219   return *this;
220 }
221 
222 AnyObj::~AnyObj() {
223   // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
224   if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap.
225     _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
226   }
227 }
228 #endif // ASSERT
229 
230 //--------------------------------------------------------------------------------------
231 // Non-product code
232 
233 #ifndef PRODUCT
234 void AnyObj::print() const       { print_on(tty); }
235 
236 void AnyObj::print_on(outputStream* st) const {
237   st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this));
238 }
239 
240 ReallocMark::ReallocMark() {
241 #ifdef ASSERT
242   Thread *thread = Thread::current();
243   _nesting = thread->resource_area()->nesting();
244 #endif
245 }
246 
247 void ReallocMark::check(Arena* arena) {
248 #ifdef ASSERT
249   if ((arena == nullptr || arena == Thread::current()->resource_area()) &&
250       _nesting != Thread::current()->resource_area()->nesting()) {
251     fatal("allocation bug: array could grow within nested ResourceMark");
252   }
253 #endif
254 }
255 
256 #endif // Non-product