1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "memory/allocation.hpp"
 27 #include "memory/allocation.inline.hpp"
 28 #include "memory/arena.hpp"
 29 #include "memory/metaspace.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nmt/memTracker.hpp"
 32 #include "runtime/os.hpp"
 33 #include "runtime/task.hpp"
 34 #include "runtime/threadCritical.hpp"
 35 #include "utilities/ostream.hpp"
 36 
 37 // allocate using malloc; will fail if no memory available
 38 char* AllocateHeap(size_t size,
 39                    MemTag mem_tag,
 40                    const NativeCallStack& stack,
 41                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 42   char* p = (char*) os::malloc(size, mem_tag, stack);
 43   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 44     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
 45   }
 46   return p;
 47 }
 48 
 49 char* AllocateHeap(size_t size,
 50                    MemTag mem_tag,
 51                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 52   return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode);
 53 }
 54 
 55 char* ReallocateHeap(char *old,
 56                      size_t size,
 57                      MemTag mem_tag,
 58                      AllocFailType alloc_failmode) {
 59   char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC);
 60   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 61     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
 62   }
 63   return p;
 64 }
 65 
 66 // handles null pointers
 67 void FreeHeap(void* p) {
 68   os::free(p);
 69 }
 70 
 71 void* MetaspaceObj::_shared_metaspace_base = nullptr;
 72 void* MetaspaceObj::_shared_metaspace_top  = nullptr;
 73 
 74 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 75                                  size_t word_size,
 76                                  MetaspaceObj::Type type, TRAPS) throw() {
 77   // Klass has its own operator new
 78   assert(type != ClassType, "class has its own operator new");
 79   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false, THREAD);
 80 }
 81 
 82 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 83                                  size_t word_size,
 84                                  MetaspaceObj::Type type) throw() {
 85   assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread");
 86   assert(type != ClassType, "class has its own operator new");
 87   return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false);
 88 }
 89 
 90 bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
 91   // Weed out obvious bogus values first without traversing metaspace
 92   if ((size_t)p < os::min_page_size()) {
 93     return false;
 94   } else if (!is_aligned((address)p, sizeof(MetaWord))) {
 95     return false;
 96   }
 97   return Metaspace::contains((void*)p);
 98 }
 99 
100 void MetaspaceObj::print_address_on(outputStream* st) const {
101   st->print(" {" PTR_FORMAT "}", p2i(this));
102 }
103 
104 //
105 // ArenaObj
106 //
107 
108 void* ArenaObj::operator new(size_t size, Arena *arena) throw() {
109   return arena->Amalloc(size);
110 }
111 
112 //
113 // AnyObj
114 //
115 
116 void* AnyObj::operator new(size_t size, Arena *arena) {
117   address res = (address)arena->Amalloc(size);
118   DEBUG_ONLY(set_allocation_type(res, ARENA);)
119   return res;
120 }
121 
122 void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() {
123   address res = (address)AllocateHeap(size, mem_tag, CALLER_PC);
124   DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
125   return res;
126 }
127 
128 void* AnyObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
129     MemTag mem_tag) throw() {
130   // should only call this with std::nothrow, use other operator new() otherwise
131     address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL);
132     DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
133   return res;
134 }
135 
136 void AnyObj::operator delete(void* p) {
137   if (p == nullptr) {
138     return;
139   }
140   assert(((AnyObj *)p)->allocated_on_C_heap(),
141          "delete only allowed for C_HEAP objects");
142   DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
143   FreeHeap(p);
144 }
145 
146 #ifdef ASSERT
147 void AnyObj::set_allocation_type(address res, allocation_type type) {
148   // Set allocation type in the resource object
149   uintptr_t allocation = (uintptr_t)res;
150   assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res));
151   assert(type <= allocation_mask, "incorrect allocation type");
152   AnyObj* resobj = (AnyObj *)res;
153   resobj->_allocation_t[0] = ~(allocation + type);
154   if (type != STACK_OR_EMBEDDED) {
155     // Called from operator new(), set verification value.
156     resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
157   }
158 }
159 
160 AnyObj::allocation_type AnyObj::get_allocation_type() const {
161   assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
162   return (allocation_type)((~_allocation_t[0]) & allocation_mask);
163 }
164 
165 bool AnyObj::is_type_set() const {
166   allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
167   return get_allocation_type()  == type &&
168          (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
169 }
170 
171 // This whole business of passing information from AnyObj::operator new
172 // to the AnyObj constructor via fields in the "object" is technically UB.
173 // But it seems to work within the limitations of HotSpot usage (such as no
174 // multiple inheritance) with the compilers and compiler options we're using.
175 // And it gives some possibly useful checking for misuse of AnyObj.
176 void AnyObj::initialize_allocation_info() {
177   if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
178     // Operator new() is not called for allocations
179     // on stack and for embedded objects.
180     set_allocation_type((address)this, STACK_OR_EMBEDDED);
181   } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED
182     // For some reason we got a value which resembles
183     // an embedded or stack object (operator new() does not
184     // set such type). Keep it since it is valid value
185     // (even if it was garbage).
186     // Ignore garbage in other fields.
187   } else if (is_type_set()) {
188     // Operator new() was called and type was set.
189     assert(!allocated_on_stack_or_embedded(),
190            "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
191            p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
192   } else {
193     // Operator new() was not called.
194     // Assume that it is embedded or stack object.
195     set_allocation_type((address)this, STACK_OR_EMBEDDED);
196   }
197   _allocation_t[1] = 0; // Zap verification value
198 }
199 
200 AnyObj::AnyObj() {
201   initialize_allocation_info();
202 }
203 
204 AnyObj::AnyObj(const AnyObj&) {
205   // Initialize _allocation_t as a new object, ignoring object being copied.
206   initialize_allocation_info();
207 }
208 
209 AnyObj& AnyObj::operator=(const AnyObj& r) {
210   assert(allocated_on_stack_or_embedded(),
211          "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
212          p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
213   // Keep current _allocation_t value;
214   return *this;
215 }
216 
217 AnyObj::~AnyObj() {
218   // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
219   if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap.
220     _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
221   }
222 }
223 #endif // ASSERT
224 
225 //--------------------------------------------------------------------------------------
226 // Non-product code
227 
228 #ifndef PRODUCT
229 void AnyObj::print() const       { print_on(tty); }
230 
231 void AnyObj::print_on(outputStream* st) const {
232   st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this));
233 }
234 
235 ReallocMark::ReallocMark() {
236 #ifdef ASSERT
237   Thread *thread = Thread::current();
238   _nesting = thread->resource_area()->nesting();
239 #endif
240 }
241 
242 void ReallocMark::check(Arena* arena) {
243 #ifdef ASSERT
244   if ((arena == nullptr || arena == Thread::current()->resource_area()) &&
245       _nesting != Thread::current()->resource_area()->nesting()) {
246     fatal("allocation bug: array could grow within nested ResourceMark");
247   }
248 #endif
249 }
250 
251 #endif // Non-product