1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/javaClasses.hpp"
 26 #include "classfile/vmClasses.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/collectedHeap.hpp"
 29 #include "gc/shared/memAllocator.hpp"
 30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 31 #include "gc/shared/tlab_globals.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/arrayOop.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/jvmtiExport.hpp"
 36 #include "runtime/arguments.hpp"
 37 #include "runtime/continuationJavaClasses.inline.hpp"
 38 #include "runtime/handles.inline.hpp"
 39 #include "runtime/javaThread.hpp"
 40 #include "runtime/sharedRuntime.hpp"
 41 #include "services/lowMemoryDetector.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/copy.hpp"
 44 #include "utilities/globalDefinitions.hpp"
 45 
 46 class MemAllocator::Allocation: StackObj {
 47   friend class MemAllocator;
 48 
 49   const MemAllocator& _allocator;
 50   JavaThread*         _thread;
 51   oop*                _obj_ptr;
 52   bool                _allocated_outside_tlab;
 53   size_t              _allocated_tlab_size;
 54 
 55   bool check_out_of_memory();
 56   void verify_before();
 57   void verify_after();
 58   void notify_allocation();
 59   void notify_allocation_jvmti_sampler();
 60   void notify_allocation_low_memory_detector();
 61   void notify_allocation_jfr_sampler();
 62   void notify_allocation_dtrace_sampler();
 63 #ifdef ASSERT
 64   void check_for_valid_allocation_state() const;
 65 #endif
 66 
 67   class PreserveObj;
 68 
 69 public:
 70   Allocation(const MemAllocator& allocator, oop* obj_ptr)
 71     : _allocator(allocator),
 72       _thread(JavaThread::cast(allocator._thread)), // Do not use Allocation in non-JavaThreads.
 73       _obj_ptr(obj_ptr),
 74       _allocated_outside_tlab(false),
 75       _allocated_tlab_size(0)
 76   {
 77     assert(Thread::current() == allocator._thread, "do not pass MemAllocator across threads");
 78     verify_before();
 79   }
 80 
 81   ~Allocation() {
 82     if (!check_out_of_memory()) {
 83       notify_allocation();
 84     }
 85   }
 86 
 87   oop obj() const { return *_obj_ptr; }
 88 };
 89 
 90 class MemAllocator::Allocation::PreserveObj: StackObj {
 91   HandleMark _handle_mark;
 92   Handle     _handle;
 93   oop* const _obj_ptr;
 94 
 95 public:
 96   PreserveObj(JavaThread* thread, oop* obj_ptr)
 97     : _handle_mark(thread),
 98       _handle(thread, *obj_ptr),
 99       _obj_ptr(obj_ptr)
100   {
101     *obj_ptr = nullptr;
102   }
103 
104   ~PreserveObj() {
105     *_obj_ptr = _handle();
106   }
107 
108   oop operator()() const {
109     return _handle();
110   }
111 };
112 
113 bool MemAllocator::Allocation::check_out_of_memory() {
114   JavaThread* THREAD = _thread; // For exception macros.
115   assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
116 
117   if (obj() != nullptr) {
118     return false;
119   }
120 
121   const char* message = "Java heap space";
122   if (!_thread->is_in_internal_oome_mark()) {
123     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
124     report_java_out_of_memory(message);
125     if (JvmtiExport::should_post_resource_exhausted()) {
126 #ifdef CHECK_UNHANDLED_OOPS
127       // obj is null, no need to handle, but CheckUnhandledOops is not aware about null
128       THREAD->allow_unhandled_oop(_obj_ptr);
129 #endif // CHECK_UNHANDLED_OOPS
130       JvmtiExport::post_resource_exhausted(
131         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
132         message);
133     }
134 
135     THROW_OOP_(Universe::out_of_memory_error_java_heap(), true);
136   } else {
137     THROW_OOP_(Universe::out_of_memory_error_java_heap_without_backtrace(), true);
138   }
139 }
140 
141 void MemAllocator::Allocation::verify_before() {
142   // Clear unhandled oops for memory allocation.  Memory allocation might
143   // not take out a lock if from tlab, so clear here.
144   JavaThread* THREAD = _thread; // For exception macros.
145   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
146   DEBUG_ONLY(check_for_valid_allocation_state());
147   assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed");
148 }
149 
150 #ifdef ASSERT
151 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
152   // How to choose between a pending exception and a potential
153   // OutOfMemoryError?  Don't allow pending exceptions.
154   // This is a VM policy failure, so how do we exhaustively test it?
155   assert(!_thread->has_pending_exception(),
156          "shouldn't be allocating with pending exception");
157   // Allocation of an oop can always invoke a safepoint.
158   _thread->check_for_valid_safepoint_state();
159 }
160 #endif
161 
162 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
163   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
164   JvmtiExport::vm_object_alloc_event_collector(obj());
165 
166   if (!JvmtiExport::should_post_sampled_object_alloc()) {
167     // Sampling disabled
168     return;
169   }
170 
171   ThreadHeapSampler& heap_sampler = _thread->heap_sampler();
172   ThreadLocalAllocBuffer& tlab = _thread->tlab();
173 
174   // Log sample decision
175   heap_sampler.log_sample_decision(tlab.top());
176 
177   if (heap_sampler.should_sample(tlab.top())) {
178     // If we want to be sampling, protect the allocated object with a Handle
179     // before doing the callback. The callback is done in the destructor of
180     // the JvmtiSampledObjectAllocEventCollector.
181     PreserveObj obj_h(_thread, _obj_ptr);
182     JvmtiSampledObjectAllocEventCollector collector;
183 
184     // Perform the sampling
185     heap_sampler.sample(obj_h(), tlab.top());
186 
187     // Note that after this point all the TLAB can have been retired, and agent
188     // code can run and allocate, don't rely on earlier calculations involving
189     // the TLAB.
190   }
191 
192   // Set a new sampling point in the TLAB if it fits in the current TLAB
193   const size_t words_until_sample = heap_sampler.bytes_until_sample(tlab.top()) / HeapWordSize;
194   if (words_until_sample <= tlab.free()) {
195     tlab.set_sampling_point(tlab.top() + words_until_sample);
196   }
197 }
198 
199 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
200   // support low memory notifications (no-op if not enabled)
201   LowMemoryDetector::detect_low_memory_for_collected_pools();
202 }
203 
204 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
205   HeapWord* mem = cast_from_oop<HeapWord*>(obj());
206   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
207 
208   if (_allocated_outside_tlab) {
209     AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
210   } else if (_allocated_tlab_size != 0) {
211     // TLAB was refilled
212     AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
213                                              size_in_bytes, _thread);
214   }
215 }
216 
217 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
218   if (DTraceAllocProbes) {
219     // support for Dtrace object alloc event (no-op most of the time)
220     Klass* klass = obj()->klass();
221     size_t word_size = _allocator._word_size;
222     if (klass != nullptr && klass->name() != nullptr) {
223       SharedRuntime::dtrace_object_alloc(_thread, obj(), word_size);
224     }
225   }
226 }
227 
228 void MemAllocator::Allocation::notify_allocation() {
229   notify_allocation_low_memory_detector();
230   notify_allocation_jfr_sampler();
231   notify_allocation_dtrace_sampler();
232   notify_allocation_jvmti_sampler();
233 }
234 
235 HeapWord* MemAllocator::mem_allocate_outside_tlab(Allocation& allocation) const {
236   allocation._allocated_outside_tlab = true;
237   HeapWord* mem = Universe::heap()->mem_allocate(_word_size);
238   if (mem == nullptr) {
239     return mem;
240   }
241 
242   size_t size_in_bytes = _word_size * HeapWordSize;
243   _thread->incr_allocated_bytes(size_in_bytes);
244   _thread->heap_sampler().inc_outside_tlab_bytes(size_in_bytes);
245 
246   return mem;
247 }
248 
249 HeapWord* MemAllocator::mem_allocate_inside_tlab_fast() const {
250   return _thread->tlab().allocate(_word_size);
251 }
252 
253 HeapWord* MemAllocator::mem_allocate_inside_tlab_slow(Allocation& allocation) const {
254   HeapWord* mem = nullptr;
255   ThreadLocalAllocBuffer& tlab = _thread->tlab();
256 
257   if (JvmtiExport::should_post_sampled_object_alloc()) {
258     // When sampling we artificially set the TLAB end to the sample point.
259     // When we hit that point it looks like the TLAB is full, but it's
260     // not necessarily the case. Set the real end and retry the allocation.
261 
262     // Undo previous adjustment of end.
263     // Note that notify_allocation_jvmti_sampler will set a new sample point.
264     tlab.set_back_allocation_end();
265 
266     // Retry the TLAB allocation with the proper end
267     mem = tlab.allocate(_word_size);
268 
269     if (mem != nullptr) {
270       return mem;
271     }
272   }
273 
274   // Retain tlab and allocate object in shared space if
275   // the amount free in the tlab is too large to discard.
276   if (tlab.free() > tlab.refill_waste_limit()) {
277     tlab.record_slow_allocation(_word_size);
278     return nullptr;
279   }
280 
281   // Discard tlab and allocate a new one.
282 
283   // Record the amount wasted
284   tlab.record_refill_waste();
285 
286   // Retire the current TLAB
287   _thread->retire_tlab();
288 
289   // To minimize fragmentation, the last TLAB may be smaller than the rest.
290   size_t new_tlab_size = tlab.compute_size(_word_size);
291 
292   if (new_tlab_size == 0) {
293     return nullptr;
294   }
295 
296   // Allocate a new TLAB requesting new_tlab_size. Any size
297   // between minimal and new_tlab_size is accepted.
298   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
299   mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
300   if (mem == nullptr) {
301     assert(allocation._allocated_tlab_size == 0,
302            "Allocation failed, but actual size was updated. min: %zu"
303            ", desired: %zu, actual: %zu",
304            min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
305     return nullptr;
306   }
307   assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
308          PTR_FORMAT " min: %zu, desired: %zu",
309          p2i(mem), min_tlab_size, new_tlab_size);
310 
311   // ...and clear or zap just allocated TLAB, if needed.
312   if (ZeroTLAB) {
313     Copy::zero_to_words(mem, allocation._allocated_tlab_size);
314   } else if (ZapTLAB) {
315     // Skip mangling the space corresponding to the object header to
316     // ensure that the returned space is not considered parsable by
317     // any concurrent GC thread.
318     size_t hdr_size = oopDesc::header_size();
319     Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
320   }
321 
322   _thread->fill_tlab(mem, _word_size, allocation._allocated_tlab_size);
323 
324   return mem;
325 }
326 
327 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
328   if (UseTLAB) {
329     // Try allocating from an existing TLAB.
330     HeapWord* mem = mem_allocate_inside_tlab_fast();
331     if (mem != nullptr) {
332       return mem;
333     }
334   }
335 
336   // Allocation of an oop can always invoke a safepoint.
337   DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state());
338 
339   if (UseTLAB) {
340     // Try refilling the TLAB and allocating the object in it.
341     HeapWord* mem = mem_allocate_inside_tlab_slow(allocation);
342     if (mem != nullptr) {
343       return mem;
344     }
345   }
346 
347   return mem_allocate_outside_tlab(allocation);
348 }
349 
350 oop MemAllocator::allocate() const {
351   oop obj = nullptr;
352   {
353     Allocation allocation(*this, &obj);
354     HeapWord* mem = mem_allocate(allocation);
355     if (mem != nullptr) {
356       obj = initialize(mem);
357     } else {
358       // The unhandled oop detector will poison local variable obj,
359       // so reset it to null if mem is null.
360       obj = nullptr;
361     }
362   }
363   return obj;
364 }
365 
366 void MemAllocator::mem_clear(HeapWord* mem) const {
367   assert(mem != nullptr, "cannot initialize null object");
368   const size_t hs = oopDesc::header_size();
369   assert(_word_size >= hs, "unexpected object size");
370   if (oopDesc::has_klass_gap()) {
371     oopDesc::set_klass_gap(mem, 0);
372   }
373   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
374 }
375 
376 oop MemAllocator::finish(HeapWord* mem) const {
377   assert(mem != nullptr, "null object pointer");
378   // Need a release store to ensure array/class length, mark word, and
379   // object zeroing are visible before setting the klass non-null, for
380   // concurrent collectors.
381   if (UseCompactObjectHeaders) {
382     oopDesc::release_set_mark(mem, _klass->prototype_header());
383   } else {
384     if (Arguments::is_valhalla_enabled()) {
385       oopDesc::set_mark(mem, _klass->prototype_header());
386     } else {
387       oopDesc::set_mark(mem, markWord::prototype());
388     }
389     oopDesc::release_set_klass(mem, _klass);
390   }
391   return cast_to_oop(mem);
392 }
393 
394 oop ObjAllocator::initialize(HeapWord* mem) const {
395   mem_clear(mem);
396   return finish(mem);
397 }
398 
399 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
400   // Set array length before setting the _klass field because a
401   // non-null klass field indicates that the object is parsable by
402   // concurrent GC.
403   assert(_length >= 0, "length should be non-negative");
404   if (_do_zero) {
405     mem_clear(mem);
406     mem_zap_start_padding(mem);
407     mem_zap_end_padding(mem);
408   }
409   arrayOopDesc::set_length(mem, _length);
410   return finish(mem);
411 }
412 
413 #ifndef PRODUCT
414 void ObjArrayAllocator::mem_zap_start_padding(HeapWord* mem) const {
415   const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
416   const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
417   const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes();
418 
419   const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
420   const address header_end = reinterpret_cast<address>(mem) + header_size_in_bytes;
421 
422   if (header_end < base) {
423     const size_t padding_in_bytes = base - header_end;
424     Copy::fill_to_bytes(header_end, padding_in_bytes, heapPaddingByteVal);
425   }
426 }
427 
428 void ObjArrayAllocator::mem_zap_end_padding(HeapWord* mem) const {
429   const size_t length_in_bytes = static_cast<size_t>(_length) << ArrayKlass::cast(_klass)->log2_element_size();
430   const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
431   const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
432   const size_t size_in_bytes = _word_size * BytesPerWord;
433 
434   const address obj_end = reinterpret_cast<address>(mem) + size_in_bytes;
435   const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
436   const address elements_end = base + length_in_bytes;
437   assert(elements_end <= obj_end, "payload must fit in object");
438   if (elements_end < obj_end) {
439     const size_t padding_in_bytes = obj_end - elements_end;
440     Copy::fill_to_bytes(elements_end, padding_in_bytes, heapPaddingByteVal);
441   }
442 }
443 #endif
444 
445 oop ClassAllocator::initialize(HeapWord* mem) const {
446   // Set oop_size field before setting the _klass field because a
447   // non-null _klass field indicates that the object is parsable by
448   // concurrent GC.
449   assert(_word_size > 0, "oop_size must be positive.");
450   mem_clear(mem);
451   java_lang_Class::set_oop_size(mem, _word_size);
452   return finish(mem);
453 }