1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/javaClasses.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/collectedHeap.hpp"
 29 #include "gc/shared/memAllocator.hpp"
 30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 31 #include "gc/shared/tlab_globals.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/arrayOop.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/jvmtiExport.hpp"
 36 #include "runtime/continuationJavaClasses.inline.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/javaThread.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "services/lowMemoryDetector.hpp"
 41 #include "utilities/align.hpp"
 42 #include "utilities/copy.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 
 45 class MemAllocator::Allocation: StackObj {
 46   friend class MemAllocator;
 47 
 48   const MemAllocator& _allocator;
 49   JavaThread*         _thread;
 50   oop*                _obj_ptr;
 51   bool                _overhead_limit_exceeded;
 52   bool                _allocated_outside_tlab;
 53   size_t              _allocated_tlab_size;
 54   bool                _tlab_end_reset_for_sample;
 55 
 56   bool check_out_of_memory();
 57   void verify_before();
 58   void verify_after();
 59   void notify_allocation();
 60   void notify_allocation_jvmti_sampler();
 61   void notify_allocation_low_memory_detector();
 62   void notify_allocation_jfr_sampler();
 63   void notify_allocation_dtrace_sampler();
 64 #ifdef ASSERT
 65   void check_for_valid_allocation_state() const;
 66 #endif
 67 
 68   class PreserveObj;
 69 
 70 public:
 71   Allocation(const MemAllocator& allocator, oop* obj_ptr)
 72     : _allocator(allocator),
 73       _thread(JavaThread::cast(allocator._thread)), // Do not use Allocation in non-JavaThreads.
 74       _obj_ptr(obj_ptr),
 75       _overhead_limit_exceeded(false),
 76       _allocated_outside_tlab(false),
 77       _allocated_tlab_size(0),
 78       _tlab_end_reset_for_sample(false)
 79   {
 80     assert(Thread::current() == allocator._thread, "do not pass MemAllocator across threads");
 81     verify_before();
 82   }
 83 
 84   ~Allocation() {
 85     if (!check_out_of_memory()) {
 86       notify_allocation();
 87     }
 88   }
 89 
 90   oop obj() const { return *_obj_ptr; }
 91 };
 92 
 93 class MemAllocator::Allocation::PreserveObj: StackObj {
 94   HandleMark _handle_mark;
 95   Handle     _handle;
 96   oop* const _obj_ptr;
 97 
 98 public:
 99   PreserveObj(JavaThread* thread, oop* obj_ptr)
100     : _handle_mark(thread),
101       _handle(thread, *obj_ptr),
102       _obj_ptr(obj_ptr)
103   {
104     *obj_ptr = nullptr;
105   }
106 
107   ~PreserveObj() {
108     *_obj_ptr = _handle();
109   }
110 
111   oop operator()() const {
112     return _handle();
113   }
114 };
115 
116 bool MemAllocator::Allocation::check_out_of_memory() {
117   JavaThread* THREAD = _thread; // For exception macros.
118   assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
119 
120   if (obj() != nullptr) {
121     return false;
122   }
123 
124   const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
125   if (!_thread->is_in_internal_oome_mark()) {
126     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
127     report_java_out_of_memory(message);
128     if (JvmtiExport::should_post_resource_exhausted()) {
129 #ifdef CHECK_UNHANDLED_OOPS
130       // obj is null, no need to handle, but CheckUnhandledOops is not aware about null
131       THREAD->allow_unhandled_oop(_obj_ptr);
132 #endif // CHECK_UNHANDLED_OOPS
133       JvmtiExport::post_resource_exhausted(
134         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
135         message);
136     }
137 
138     oop exception = _overhead_limit_exceeded ?
139         Universe::out_of_memory_error_gc_overhead_limit() :
140         Universe::out_of_memory_error_java_heap();
141     THROW_OOP_(exception, true);
142   } else {
143     THROW_OOP_(Universe::out_of_memory_error_java_heap_without_backtrace(), true);
144   }
145 }
146 
147 void MemAllocator::Allocation::verify_before() {
148   // Clear unhandled oops for memory allocation.  Memory allocation might
149   // not take out a lock if from tlab, so clear here.
150   JavaThread* THREAD = _thread; // For exception macros.
151   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
152   DEBUG_ONLY(check_for_valid_allocation_state());
153   assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed");
154 }
155 
156 #ifdef ASSERT
157 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
158   // How to choose between a pending exception and a potential
159   // OutOfMemoryError?  Don't allow pending exceptions.
160   // This is a VM policy failure, so how do we exhaustively test it?
161   assert(!_thread->has_pending_exception(),
162          "shouldn't be allocating with pending exception");
163   // Allocation of an oop can always invoke a safepoint.
164   _thread->check_for_valid_safepoint_state();
165 }
166 #endif
167 
168 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
169   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
170   JvmtiExport::vm_object_alloc_event_collector(obj());
171 
172   if (!JvmtiExport::should_post_sampled_object_alloc()) {
173     // Sampling disabled
174     return;
175   }
176 
177   if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
178     // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
179     // or expands it due to taking a sampler induced slow path.
180     return;
181   }
182 
183   // If we want to be sampling, protect the allocated object with a Handle
184   // before doing the callback. The callback is done in the destructor of
185   // the JvmtiSampledObjectAllocEventCollector.
186   size_t bytes_since_last = 0;
187 
188   {
189     PreserveObj obj_h(_thread, _obj_ptr);
190     JvmtiSampledObjectAllocEventCollector collector;
191     size_t size_in_bytes = _allocator._word_size * HeapWordSize;
192     ThreadLocalAllocBuffer& tlab = _thread->tlab();
193 
194     if (!_allocated_outside_tlab) {
195       bytes_since_last = tlab.bytes_since_last_sample_point();
196     }
197 
198     _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
199   }
200 
201   if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
202     // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
203     _thread->tlab().set_sample_end(bytes_since_last != 0);
204   }
205 }
206 
207 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
208   // support low memory notifications (no-op if not enabled)
209   LowMemoryDetector::detect_low_memory_for_collected_pools();
210 }
211 
212 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
213   HeapWord* mem = cast_from_oop<HeapWord*>(obj());
214   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
215 
216   if (_allocated_outside_tlab) {
217     AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
218   } else if (_allocated_tlab_size != 0) {
219     // TLAB was refilled
220     AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
221                                              size_in_bytes, _thread);
222   }
223 }
224 
225 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
226   if (DTraceAllocProbes) {
227     // support for Dtrace object alloc event (no-op most of the time)
228     Klass* klass = obj()->klass();
229     size_t word_size = _allocator._word_size;
230     if (klass != nullptr && klass->name() != nullptr) {
231       SharedRuntime::dtrace_object_alloc(_thread, obj(), word_size);
232     }
233   }
234 }
235 
236 void MemAllocator::Allocation::notify_allocation() {
237   notify_allocation_low_memory_detector();
238   notify_allocation_jfr_sampler();
239   notify_allocation_dtrace_sampler();
240   notify_allocation_jvmti_sampler();
241 }
242 
243 HeapWord* MemAllocator::mem_allocate_outside_tlab(Allocation& allocation) const {
244   allocation._allocated_outside_tlab = true;
245   HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
246   if (mem == nullptr) {
247     return mem;
248   }
249 
250   size_t size_in_bytes = _word_size * HeapWordSize;
251   _thread->incr_allocated_bytes(size_in_bytes);
252 
253   return mem;
254 }
255 
256 HeapWord* MemAllocator::mem_allocate_inside_tlab_fast() const {
257   return _thread->tlab().allocate(_word_size);
258 }
259 
260 HeapWord* MemAllocator::mem_allocate_inside_tlab_slow(Allocation& allocation) const {
261   HeapWord* mem = nullptr;
262   ThreadLocalAllocBuffer& tlab = _thread->tlab();
263 
264   if (JvmtiExport::should_post_sampled_object_alloc()) {
265     tlab.set_back_allocation_end();
266     mem = tlab.allocate(_word_size);
267 
268     // We set back the allocation sample point to try to allocate this, reset it
269     // when done.
270     allocation._tlab_end_reset_for_sample = true;
271 
272     if (mem != nullptr) {
273       return mem;
274     }
275   }
276 
277   // Retain tlab and allocate object in shared space if
278   // the amount free in the tlab is too large to discard.
279   if (tlab.free() > tlab.refill_waste_limit()) {
280     tlab.record_slow_allocation(_word_size);
281     return nullptr;
282   }
283 
284   // Discard tlab and allocate a new one.
285   // To minimize fragmentation, the last TLAB may be smaller than the rest.
286   size_t new_tlab_size = tlab.compute_size(_word_size);
287 
288   tlab.retire_before_allocation();
289 
290   if (new_tlab_size == 0) {
291     return nullptr;
292   }
293 
294   // Allocate a new TLAB requesting new_tlab_size. Any size
295   // between minimal and new_tlab_size is accepted.
296   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
297   mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
298   if (mem == nullptr) {
299     assert(allocation._allocated_tlab_size == 0,
300            "Allocation failed, but actual size was updated. min: %zu"
301            ", desired: %zu, actual: %zu",
302            min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
303     return nullptr;
304   }
305   assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
306          PTR_FORMAT " min: %zu, desired: %zu",
307          p2i(mem), min_tlab_size, new_tlab_size);
308 
309   // ...and clear or zap just allocated TLAB, if needed.
310   if (ZeroTLAB) {
311     Copy::zero_to_words(mem, allocation._allocated_tlab_size);
312   } else if (ZapTLAB) {
313     // Skip mangling the space corresponding to the object header to
314     // ensure that the returned space is not considered parsable by
315     // any concurrent GC thread.
316     size_t hdr_size = oopDesc::header_size();
317     Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
318   }
319 
320   tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size);
321   return mem;
322 }
323 
324 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
325   if (UseTLAB) {
326     // Try allocating from an existing TLAB.
327     HeapWord* mem = mem_allocate_inside_tlab_fast();
328     if (mem != nullptr) {
329       return mem;
330     }
331   }
332 
333   // Allocation of an oop can always invoke a safepoint.
334   DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state());
335 
336   if (UseTLAB) {
337     // Try refilling the TLAB and allocating the object in it.
338     HeapWord* mem = mem_allocate_inside_tlab_slow(allocation);
339     if (mem != nullptr) {
340       return mem;
341     }
342   }
343 
344   return mem_allocate_outside_tlab(allocation);
345 }
346 
347 oop MemAllocator::allocate() const {
348   oop obj = nullptr;
349   {
350     Allocation allocation(*this, &obj);
351     HeapWord* mem = mem_allocate(allocation);
352     if (mem != nullptr) {
353       obj = initialize(mem);
354     } else {
355       // The unhandled oop detector will poison local variable obj,
356       // so reset it to null if mem is null.
357       obj = nullptr;
358     }
359   }
360   return obj;
361 }
362 
363 void MemAllocator::mem_clear(HeapWord* mem) const {
364   assert(mem != nullptr, "cannot initialize null object");
365   const size_t hs = oopDesc::header_size();
366   assert(_word_size >= hs, "unexpected object size");
367   if (oopDesc::has_klass_gap()) {
368     oopDesc::set_klass_gap(mem, 0);
369   }
370   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
371 }
372 
373 oop MemAllocator::finish(HeapWord* mem) const {
374   assert(mem != nullptr, "null object pointer");
375   // Need a release store to ensure array/class length, mark word, and
376   // object zeroing are visible before setting the klass non-null, for
377   // concurrent collectors.
378   if (UseCompactObjectHeaders) {
379     oopDesc::release_set_mark(mem, _klass->prototype_header());
380   } else {
381     oopDesc::set_mark(mem, markWord::prototype());
382     oopDesc::release_set_klass(mem, _klass);
383   }
384   return cast_to_oop(mem);
385 }
386 
387 oop ObjAllocator::initialize(HeapWord* mem) const {
388   mem_clear(mem);
389   return finish(mem);
390 }
391 
392 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
393   // Set array length before setting the _klass field because a
394   // non-null klass field indicates that the object is parsable by
395   // concurrent GC.
396   assert(_length >= 0, "length should be non-negative");
397   if (_do_zero) {
398     mem_clear(mem);
399     mem_zap_start_padding(mem);
400     mem_zap_end_padding(mem);
401   }
402   arrayOopDesc::set_length(mem, _length);
403   return finish(mem);
404 }
405 
406 #ifndef PRODUCT
407 void ObjArrayAllocator::mem_zap_start_padding(HeapWord* mem) const {
408   const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
409   const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
410   const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes();
411 
412   const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
413   const address header_end = reinterpret_cast<address>(mem) + header_size_in_bytes;
414 
415   if (header_end < base) {
416     const size_t padding_in_bytes = base - header_end;
417     Copy::fill_to_bytes(header_end, padding_in_bytes, heapPaddingByteVal);
418   }
419 }
420 
421 void ObjArrayAllocator::mem_zap_end_padding(HeapWord* mem) const {
422   const size_t length_in_bytes = static_cast<size_t>(_length) << ArrayKlass::cast(_klass)->log2_element_size();
423   const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
424   const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
425   const size_t size_in_bytes = _word_size * BytesPerWord;
426 
427   const address obj_end = reinterpret_cast<address>(mem) + size_in_bytes;
428   const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
429   const address elements_end = base + length_in_bytes;
430   assert(elements_end <= obj_end, "payload must fit in object");
431   if (elements_end < obj_end) {
432     const size_t padding_in_bytes = obj_end - elements_end;
433     Copy::fill_to_bytes(elements_end, padding_in_bytes, heapPaddingByteVal);
434   }
435 }
436 #endif
437 
438 oop ClassAllocator::initialize(HeapWord* mem) const {
439   // Set oop_size field before setting the _klass field because a
440   // non-null _klass field indicates that the object is parsable by
441   // concurrent GC.
442   assert(_base_size > 0, "oop_size must be positive.");
443   mem_clear(mem);
444   java_lang_Class::set_oop_size(mem, _base_size);
445   return finish(mem);
446 }