1 /*
  2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "memory/allocation.hpp"
 27 #include "memory/allocation.inline.hpp"
 28 #include "memory/arena.hpp"
 29 #include "memory/metaspace.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "nmt/memTracker.hpp"
 32 #include "runtime/os.hpp"
 33 #include "runtime/task.hpp"
 34 #include "runtime/threadCritical.hpp"
 35 #include "utilities/ostream.hpp"
 36 
 37 // allocate using malloc; will fail if no memory available
 38 char* AllocateHeap(size_t size,
 39                    MEMFLAGS flags,
 40                    const NativeCallStack& stack,
 41                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 42   char* p = (char*) os::malloc(size, flags, stack);
 43   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 44     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
 45   }
 46   return p;
 47 }
 48 
 49 char* AllocateHeap(size_t size,
 50                    MEMFLAGS flags,
 51                    AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
 52   return AllocateHeap(size, flags, CALLER_PC, alloc_failmode);
 53 }
 54 
 55 char* ReallocateHeap(char *old,
 56                      size_t size,
 57                      MEMFLAGS flag,
 58                      AllocFailType alloc_failmode) {
 59   char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
 60   if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 61     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
 62   }
 63   return p;
 64 }
 65 
 66 // handles null pointers
 67 void FreeHeap(void* p) {
 68   os::free(p);
 69 }
 70 
 71 void* MetaspaceObj::_shared_metaspace_base = nullptr;
 72 void* MetaspaceObj::_shared_metaspace_top  = nullptr;
 73 
 74 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 75                                  size_t word_size,
 76                                  MetaspaceObj::Type type, TRAPS) throw() {
 77   // Klass has its own operator new
 78   return Metaspace::allocate(loader_data, word_size, type, THREAD);
 79 }
 80 
 81 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
 82                                  size_t word_size,
 83                                  MetaspaceObj::Type type) throw() {
 84   assert(!Thread::current()->is_Java_thread(), "only allowed by non-Java thread");
 85   return Metaspace::allocate(loader_data, word_size, type);
 86 }
 87 
 88 bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
 89   // Weed out obvious bogus values first without traversing metaspace
 90   if ((size_t)p < os::min_page_size()) {
 91     return false;
 92   } else if (!is_aligned((address)p, sizeof(MetaWord))) {
 93     return false;
 94   }
 95   return Metaspace::contains((void*)p);
 96 }
 97 
 98 void MetaspaceObj::print_address_on(outputStream* st) const {
 99   st->print(" {" PTR_FORMAT "}", p2i(this));
100 }
101 
102 //
103 // ArenaObj
104 //
105 
106 void* ArenaObj::operator new(size_t size, Arena *arena) throw() {
107   return arena->Amalloc(size);
108 }
109 
110 //
111 // AnyObj
112 //
113 
114 void* AnyObj::operator new(size_t size, Arena *arena) {
115   address res = (address)arena->Amalloc(size);
116   DEBUG_ONLY(set_allocation_type(res, ARENA);)
117   return res;
118 }
119 
120 void* AnyObj::operator new(size_t size, MEMFLAGS flags) throw() {
121   address res = (address)AllocateHeap(size, flags, CALLER_PC);
122   DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
123   return res;
124 }
125 
126 void* AnyObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
127     MEMFLAGS flags) throw() {
128   // should only call this with std::nothrow, use other operator new() otherwise
129     address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
130     DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
131   return res;
132 }
133 
134 void AnyObj::operator delete(void* p) {
135   if (p == nullptr) {
136     return;
137   }
138   assert(((AnyObj *)p)->allocated_on_C_heap(),
139          "delete only allowed for C_HEAP objects");
140   DEBUG_ONLY(((AnyObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
141   FreeHeap(p);
142 }
143 
144 #ifdef ASSERT
145 void AnyObj::set_allocation_type(address res, allocation_type type) {
146   // Set allocation type in the resource object
147   uintptr_t allocation = (uintptr_t)res;
148   assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res));
149   assert(type <= allocation_mask, "incorrect allocation type");
150   AnyObj* resobj = (AnyObj *)res;
151   resobj->_allocation_t[0] = ~(allocation + type);
152   if (type != STACK_OR_EMBEDDED) {
153     // Called from operator new(), set verification value.
154     resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
155   }
156 }
157 
158 AnyObj::allocation_type AnyObj::get_allocation_type() const {
159   assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
160   return (allocation_type)((~_allocation_t[0]) & allocation_mask);
161 }
162 
163 bool AnyObj::is_type_set() const {
164   allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
165   return get_allocation_type()  == type &&
166          (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
167 }
168 
169 // This whole business of passing information from AnyObj::operator new
170 // to the AnyObj constructor via fields in the "object" is technically UB.
171 // But it seems to work within the limitations of HotSpot usage (such as no
172 // multiple inheritance) with the compilers and compiler options we're using.
173 // And it gives some possibly useful checking for misuse of AnyObj.
174 void AnyObj::initialize_allocation_info() {
175   if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
176     // Operator new() is not called for allocations
177     // on stack and for embedded objects.
178     set_allocation_type((address)this, STACK_OR_EMBEDDED);
179   } else if (allocated_on_stack_or_embedded()) { // STACK_OR_EMBEDDED
180     // For some reason we got a value which resembles
181     // an embedded or stack object (operator new() does not
182     // set such type). Keep it since it is valid value
183     // (even if it was garbage).
184     // Ignore garbage in other fields.
185   } else if (is_type_set()) {
186     // Operator new() was called and type was set.
187     assert(!allocated_on_stack_or_embedded(),
188            "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
189            p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
190   } else {
191     // Operator new() was not called.
192     // Assume that it is embedded or stack object.
193     set_allocation_type((address)this, STACK_OR_EMBEDDED);
194   }
195   _allocation_t[1] = 0; // Zap verification value
196 }
197 
198 AnyObj::AnyObj() {
199   initialize_allocation_info();
200 }
201 
202 AnyObj::AnyObj(const AnyObj&) {
203   // Initialize _allocation_t as a new object, ignoring object being copied.
204   initialize_allocation_info();
205 }
206 
207 AnyObj& AnyObj::operator=(const AnyObj& r) {
208   assert(allocated_on_stack_or_embedded(),
209          "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
210          p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
211   // Keep current _allocation_t value;
212   return *this;
213 }
214 
215 AnyObj::~AnyObj() {
216   // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
217   if (!allocated_on_C_heap()) { // AnyObj::delete() will zap _allocation for C_heap.
218     _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
219   }
220 }
221 #endif // ASSERT
222 
223 //--------------------------------------------------------------------------------------
224 // Non-product code
225 
226 #ifndef PRODUCT
227 void AnyObj::print() const       { print_on(tty); }
228 
229 void AnyObj::print_on(outputStream* st) const {
230   st->print_cr("AnyObj(" PTR_FORMAT ")", p2i(this));
231 }
232 
233 ReallocMark::ReallocMark() {
234 #ifdef ASSERT
235   Thread *thread = Thread::current();
236   _nesting = thread->resource_area()->nesting();
237 #endif
238 }
239 
240 void ReallocMark::check() {
241 #ifdef ASSERT
242   if (_nesting != Thread::current()->resource_area()->nesting()) {
243     fatal("allocation bug: array could grow within nested ResourceMark");
244   }
245 #endif
246 }
247 
248 #endif // Non-product