1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "memory/allocation.hpp"
 27 #include "memory/allocation.inline.hpp"
 28 #include "memory/arena.hpp"
 29 #include "memory/metaspace.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nmt/memTracker.hpp"
 32 #include "runtime/os.hpp"
 33 #include "runtime/task.hpp"
 34 #include "runtime/threadCritical.hpp"
 35 #include "utilities/ostream.hpp"
 36 
 37 // allocate using malloc; will fail if no memory available
 38 char* AllocateHeap(size_t size,
 39                    MEMFLAGS flags,
 40                    const NativeCallStack& stack,
 41                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 42   char* p = (char*) os::malloc(size, flags, stack);
 43   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 44     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
 45   }
 46   return p;
 47 }
 48 
 49 char* AllocateHeap(size_t size,
 50                    MEMFLAGS flags,
 51                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 52   return AllocateHeap(size, flags, CALLER_PC, alloc_failmode);
 53 }
 54 
 55 char* ReallocateHeap(char *old,
 56                      size_t size,
 57                      MEMFLAGS flag,
 58                      AllocFailType alloc_failmode) {
 59   char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
 60   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 61     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
 62   }
 63   return p;
 64 }
 65 
 66 // handles null pointers
 67 void FreeHeap(void* p) {
 68   os::free(p);
 69 }
 70 
 71 void* MetaspaceObj::_shared_metaspace_base = nullptr;
 72 void* MetaspaceObj::_shared_metaspace_top  = nullptr;
 73 
 74 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 75                                  size_t word_size,
 76                                  MetaspaceObj::Type type, TRAPS) throw() {
 77   // Klass has its own operator new
 78   return Metaspace::allocate(loader_data, word_size, type, THREAD);
 79 }
 80 
 81 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 82                                  size_t word_size,
 83                                  MetaspaceObj::Type type) throw() {
 84   assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread");
 85   return Metaspace::allocate(loader_data, word_size, type);
 86 }
 87 
 88 
 89 // Work-around -- see JDK-8331086
 90 void* MetaspaceObj::operator new(size_t size, MEMFLAGS flags) throw() {
 91   void* p = AllocateHeap(size, flags, CALLER_PC);
 92   memset(p, 0, size);
 93   return p;
 94 }
 95 
 96 bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
 97   // Weed out obvious bogus values first without traversing metaspace
 98   if ((size_t)p < os::min_page_size()) {
 99     return false;
100   } else if (!is_aligned((address)p, sizeof(MetaWord))) {
101     return false;
102   }
103   return Metaspace::contains((void*)p);
104 }
105 
106 void MetaspaceObj::print_address_on(outputStream* st) const {
107   st->print(" {" PTR_FORMAT "}", p2i(this));
108 }
109 
110 //
111 // ArenaObj
112 //
113 
114 void* ArenaObj::operator new(size_t size, Arena *arena) throw() {
115   return arena->Amalloc(size);
116 }
117 
118 //
119 // AnyObj
120 //
121 
122 void* AnyObj::operator new(size_t size, Arena *arena) {
123   address res = (address)arena->Amalloc(size);
124   DEBUG_ONLY(set_allocation_type(res, ARENA);)
125   return res;
126 }
127 
128 void* AnyObj::operator new(size_t size, MEMFLAGS flags) throw() {
129   address res = (address)AllocateHeap(size, flags, CALLER_PC);
130   DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
131   return res;
132 }
133 
134 void* AnyObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
135     MEMFLAGS flags) throw() {
136   // should only call this with std::nothrow, use other operator new() otherwise
137     address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
138     DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
139   return res;
140 }
141 
142 void AnyObj::operator delete(void* p) {
143   if (p == nullptr) {
144     return;
145   }
146   assert(((AnyObj *)p)->allocated_on_C_heap(),
147          "delete only allowed for C_HEAP objects");
148   DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
149   FreeHeap(p);
150 }
151 
152 #ifdef ASSERT
153 void AnyObj::set_allocation_type(address res, allocation_type type) {
154   // Set allocation type in the resource object
155   uintptr_t allocation = (uintptr_t)res;
156   assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res));
157   assert(type <= allocation_mask, "incorrect allocation type");
158   AnyObj* resobj = (AnyObj *)res;
159   resobj->_allocation_t[0] = ~(allocation + type);
160   if (type != STACK_OR_EMBEDDED) {
161     // Called from operator new(), set verification value.
162     resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
163   }
164 }
165 
166 AnyObj::allocation_type AnyObj::get_allocation_type() const {
167   assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
168   return (allocation_type)((~_allocation_t[0]) & allocation_mask);
169 }
170 
171 bool AnyObj::is_type_set() const {
172   allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
173   return get_allocation_type()  == type &&
174          (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
175 }
176 
177 // This whole business of passing information from AnyObj::operator new
178 // to the AnyObj constructor via fields in the "object" is technically UB.
179 // But it seems to work within the limitations of HotSpot usage (such as no
180 // multiple inheritance) with the compilers and compiler options we're using.
181 // And it gives some possibly useful checking for misuse of AnyObj.
182 void AnyObj::initialize_allocation_info() {
183   if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
184     // Operator new() is not called for allocations
185     // on stack and for embedded objects.
186     set_allocation_type((address)this, STACK_OR_EMBEDDED);
187   } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED
188     // For some reason we got a value which resembles
189     // an embedded or stack object (operator new() does not
190     // set such type). Keep it since it is valid value
191     // (even if it was garbage).
192     // Ignore garbage in other fields.
193   } else if (is_type_set()) {
194     // Operator new() was called and type was set.
195     assert(!allocated_on_stack_or_embedded(),
196            "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
197            p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
198   } else {
199     // Operator new() was not called.
200     // Assume that it is embedded or stack object.
201     set_allocation_type((address)this, STACK_OR_EMBEDDED);
202   }
203   _allocation_t[1] = 0; // Zap verification value
204 }
205 
206 AnyObj::AnyObj() {
207   initialize_allocation_info();
208 }
209 
210 AnyObj::AnyObj(const AnyObj&) {
211   // Initialize _allocation_t as a new object, ignoring object being copied.
212   initialize_allocation_info();
213 }
214 
215 AnyObj& AnyObj::operator=(const AnyObj& r) {
216   assert(allocated_on_stack_or_embedded(),
217          "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
218          p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
219   // Keep current _allocation_t value;
220   return *this;
221 }
222 
223 AnyObj::~AnyObj() {
224   // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
225   if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap.
226     _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
227   }
228 }
229 #endif // ASSERT
230 
231 //--------------------------------------------------------------------------------------
232 // Non-product code
233 
234 #ifndef PRODUCT
235 void AnyObj::print() const       { print_on(tty); }
236 
237 void AnyObj::print_on(outputStream* st) const {
238   st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this));
239 }
240 
241 ReallocMark::ReallocMark() {
242 #ifdef ASSERT
243   Thread *thread = Thread::current();
244   _nesting = thread->resource_area()->nesting();
245 #endif
246 }
247 
248 void ReallocMark::check() {
249 #ifdef ASSERT
250   if (_nesting != Thread::current()->resource_area()->nesting()) {
251     fatal("allocation bug: array could grow within nested ResourceMark");
252   }
253 #endif
254 }
255 
256 #endif // Non-product