1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.hpp"
26 #include "classfile/vmClasses.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/memAllocator.hpp"
30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/jvmtiExport.hpp"
36 #include "runtime/continuationJavaClasses.inline.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "services/lowMemoryDetector.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/copy.hpp"
43 #include "utilities/globalDefinitions.hpp"
44
45 class MemAllocator::Allocation: StackObj {
46 friend class MemAllocator;
47
48 const MemAllocator& _allocator;
49 JavaThread* _thread;
50 oop* _obj_ptr;
51 bool _overhead_limit_exceeded;
52 bool _allocated_outside_tlab;
53 size_t _allocated_tlab_size;
54
55 bool check_out_of_memory();
56 void verify_before();
57 void verify_after();
58 void notify_allocation();
59 void notify_allocation_jvmti_sampler();
60 void notify_allocation_low_memory_detector();
61 void notify_allocation_jfr_sampler();
62 void notify_allocation_dtrace_sampler();
63 #ifdef ASSERT
64 void check_for_valid_allocation_state() const;
65 #endif
66
67 class PreserveObj;
68
69 public:
70 Allocation(const MemAllocator& allocator, oop* obj_ptr)
71 : _allocator(allocator),
72 _thread(JavaThread::cast(allocator._thread)), // Do not use Allocation in non-JavaThreads.
73 _obj_ptr(obj_ptr),
74 _overhead_limit_exceeded(false),
75 _allocated_outside_tlab(false),
76 _allocated_tlab_size(0)
77 {
78 assert(Thread::current() == allocator._thread, "do not pass MemAllocator across threads");
79 verify_before();
80 }
81
82 ~Allocation() {
83 if (!check_out_of_memory()) {
84 notify_allocation();
85 }
86 }
87
88 oop obj() const { return *_obj_ptr; }
89 };
90
91 class MemAllocator::Allocation::PreserveObj: StackObj {
92 HandleMark _handle_mark;
93 Handle _handle;
94 oop* const _obj_ptr;
95
96 public:
97 PreserveObj(JavaThread* thread, oop* obj_ptr)
98 : _handle_mark(thread),
99 _handle(thread, *obj_ptr),
100 _obj_ptr(obj_ptr)
101 {
102 *obj_ptr = nullptr;
103 }
104
105 ~PreserveObj() {
106 *_obj_ptr = _handle();
107 }
108
109 oop operator()() const {
110 return _handle();
111 }
112 };
113
114 bool MemAllocator::Allocation::check_out_of_memory() {
115 JavaThread* THREAD = _thread; // For exception macros.
116 assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
117
118 if (obj() != nullptr) {
119 return false;
120 }
121
122 const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
123 if (!_thread->is_in_internal_oome_mark()) {
124 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
125 report_java_out_of_memory(message);
126 if (JvmtiExport::should_post_resource_exhausted()) {
127 #ifdef CHECK_UNHANDLED_OOPS
128 // obj is null, no need to handle, but CheckUnhandledOops is not aware about null
129 THREAD->allow_unhandled_oop(_obj_ptr);
130 #endif // CHECK_UNHANDLED_OOPS
131 JvmtiExport::post_resource_exhausted(
132 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
133 message);
134 }
135
136 oop exception = _overhead_limit_exceeded ?
137 Universe::out_of_memory_error_gc_overhead_limit() :
138 Universe::out_of_memory_error_java_heap();
139 THROW_OOP_(exception, true);
140 } else {
141 THROW_OOP_(Universe::out_of_memory_error_java_heap_without_backtrace(), true);
142 }
143 }
144
145 void MemAllocator::Allocation::verify_before() {
146 // Clear unhandled oops for memory allocation. Memory allocation might
147 // not take out a lock if from tlab, so clear here.
148 JavaThread* THREAD = _thread; // For exception macros.
149 assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
150 DEBUG_ONLY(check_for_valid_allocation_state());
151 assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed");
152 }
153
154 #ifdef ASSERT
155 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
156 // How to choose between a pending exception and a potential
157 // OutOfMemoryError? Don't allow pending exceptions.
158 // This is a VM policy failure, so how do we exhaustively test it?
159 assert(!_thread->has_pending_exception(),
160 "shouldn't be allocating with pending exception");
161 // Allocation of an oop can always invoke a safepoint.
162 _thread->check_for_valid_safepoint_state();
163 }
164 #endif
165
166 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
167 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
168 JvmtiExport::vm_object_alloc_event_collector(obj());
169
170 if (!JvmtiExport::should_post_sampled_object_alloc()) {
171 // Sampling disabled
172 return;
173 }
174
175 ThreadHeapSampler& heap_sampler = _thread->heap_sampler();
176 ThreadLocalAllocBuffer& tlab = _thread->tlab();
177
178 // Log sample decision
179 heap_sampler.log_sample_decision(tlab.top());
180
181 if (heap_sampler.should_sample(tlab.top())) {
182 // If we want to be sampling, protect the allocated object with a Handle
183 // before doing the callback. The callback is done in the destructor of
184 // the JvmtiSampledObjectAllocEventCollector.
185 PreserveObj obj_h(_thread, _obj_ptr);
186 JvmtiSampledObjectAllocEventCollector collector;
187
188 // Perform the sampling
189 heap_sampler.sample(obj_h(), tlab.top());
190
191 // Note that after this point all the TLAB can have been retired, and agent
192 // code can run and allocate, don't rely on earlier calculations involving
193 // the TLAB.
194 }
195
196 // Set a new sampling point in the TLAB if it fits in the current TLAB
197 const size_t words_until_sample = heap_sampler.bytes_until_sample(tlab.top()) / HeapWordSize;
198 if (words_until_sample <= tlab.free()) {
199 tlab.set_sampling_point(tlab.top() + words_until_sample);
200 }
201 }
202
203 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
204 // support low memory notifications (no-op if not enabled)
205 LowMemoryDetector::detect_low_memory_for_collected_pools();
206 }
207
208 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
209 HeapWord* mem = cast_from_oop<HeapWord*>(obj());
210 size_t size_in_bytes = _allocator._word_size * HeapWordSize;
211
212 if (_allocated_outside_tlab) {
213 AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
214 } else if (_allocated_tlab_size != 0) {
215 // TLAB was refilled
216 AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
217 size_in_bytes, _thread);
218 }
219 }
220
221 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
222 if (DTraceAllocProbes) {
223 // support for Dtrace object alloc event (no-op most of the time)
224 Klass* klass = obj()->klass();
225 size_t word_size = _allocator._word_size;
226 if (klass != nullptr && klass->name() != nullptr) {
227 SharedRuntime::dtrace_object_alloc(_thread, obj(), word_size);
228 }
229 }
230 }
231
232 void MemAllocator::Allocation::notify_allocation() {
233 notify_allocation_low_memory_detector();
234 notify_allocation_jfr_sampler();
235 notify_allocation_dtrace_sampler();
236 notify_allocation_jvmti_sampler();
237 }
238
239 HeapWord* MemAllocator::mem_allocate_outside_tlab(Allocation& allocation) const {
240 allocation._allocated_outside_tlab = true;
241 HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
242 if (mem == nullptr) {
243 return mem;
244 }
245
246 size_t size_in_bytes = _word_size * HeapWordSize;
247 _thread->incr_allocated_bytes(size_in_bytes);
248 _thread->heap_sampler().inc_outside_tlab_bytes(size_in_bytes);
249
250 return mem;
251 }
252
253 HeapWord* MemAllocator::mem_allocate_inside_tlab_fast() const {
254 return _thread->tlab().allocate(_word_size);
255 }
256
257 HeapWord* MemAllocator::mem_allocate_inside_tlab_slow(Allocation& allocation) const {
258 HeapWord* mem = nullptr;
259 ThreadLocalAllocBuffer& tlab = _thread->tlab();
260
261 if (JvmtiExport::should_post_sampled_object_alloc()) {
262 // When sampling we artificially set the TLAB end to the sample point.
263 // When we hit that point it looks like the TLAB is full, but it's
264 // not necessarily the case. Set the real end and retry the allocation.
265
266 // Undo previous adjustment of end.
267 // Note that notify_allocation_jvmti_sampler will set a new sample point.
268 tlab.set_back_allocation_end();
269
270 // Retry the TLAB allocation with the proper end
271 mem = tlab.allocate(_word_size);
272
273 if (mem != nullptr) {
274 return mem;
275 }
276 }
277
278 // Retain tlab and allocate object in shared space if
279 // the amount free in the tlab is too large to discard.
280 if (tlab.free() > tlab.refill_waste_limit()) {
281 tlab.record_slow_allocation(_word_size);
282 return nullptr;
283 }
284
285 // Discard tlab and allocate a new one.
286
287 // Record the amount wasted
288 tlab.record_refill_waste();
289
290 // Retire the current TLAB
291 _thread->retire_tlab();
292
293 // To minimize fragmentation, the last TLAB may be smaller than the rest.
294 size_t new_tlab_size = tlab.compute_size(_word_size);
295
296 if (new_tlab_size == 0) {
297 return nullptr;
298 }
299
300 // Allocate a new TLAB requesting new_tlab_size. Any size
301 // between minimal and new_tlab_size is accepted.
302 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
303 mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
304 if (mem == nullptr) {
305 assert(allocation._allocated_tlab_size == 0,
306 "Allocation failed, but actual size was updated. min: %zu"
307 ", desired: %zu, actual: %zu",
308 min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
309 return nullptr;
310 }
311 assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
312 PTR_FORMAT " min: %zu, desired: %zu",
313 p2i(mem), min_tlab_size, new_tlab_size);
314
315 // ...and clear or zap just allocated TLAB, if needed.
316 if (ZeroTLAB) {
317 Copy::zero_to_words(mem, allocation._allocated_tlab_size);
318 } else if (ZapTLAB) {
319 // Skip mangling the space corresponding to the object header to
320 // ensure that the returned space is not considered parsable by
321 // any concurrent GC thread.
322 size_t hdr_size = oopDesc::header_size();
323 Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
324 }
325
326 _thread->fill_tlab(mem, _word_size, allocation._allocated_tlab_size);
327
328 return mem;
329 }
330
331 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
332 if (UseTLAB) {
333 // Try allocating from an existing TLAB.
334 HeapWord* mem = mem_allocate_inside_tlab_fast();
335 if (mem != nullptr) {
336 return mem;
337 }
338 }
339
340 // Allocation of an oop can always invoke a safepoint.
341 DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state());
342
343 if (UseTLAB) {
344 // Try refilling the TLAB and allocating the object in it.
345 HeapWord* mem = mem_allocate_inside_tlab_slow(allocation);
346 if (mem != nullptr) {
347 return mem;
348 }
349 }
350
351 return mem_allocate_outside_tlab(allocation);
352 }
353
354 oop MemAllocator::allocate() const {
355 oop obj = nullptr;
356 {
357 Allocation allocation(*this, &obj);
358 HeapWord* mem = mem_allocate(allocation);
359 if (mem != nullptr) {
360 obj = initialize(mem);
361 } else {
362 // The unhandled oop detector will poison local variable obj,
363 // so reset it to null if mem is null.
364 obj = nullptr;
365 }
366 }
367 return obj;
368 }
369
370 void MemAllocator::mem_clear(HeapWord* mem) const {
371 assert(mem != nullptr, "cannot initialize null object");
372 const size_t hs = oopDesc::header_size();
373 assert(_word_size >= hs, "unexpected object size");
374 if (oopDesc::has_klass_gap()) {
375 oopDesc::set_klass_gap(mem, 0);
376 }
377 Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
378 }
379
380 oop MemAllocator::finish(HeapWord* mem) const {
381 assert(mem != nullptr, "null object pointer");
382 // Need a release store to ensure array/class length, mark word, and
383 // object zeroing are visible before setting the klass non-null, for
384 // concurrent collectors.
385 if (UseCompactObjectHeaders) {
386 oopDesc::release_set_mark(mem, _klass->prototype_header());
387 } else {
388 oopDesc::set_mark(mem, markWord::prototype());
389 oopDesc::release_set_klass(mem, _klass);
390 }
391 return cast_to_oop(mem);
392 }
393
394 oop ObjAllocator::initialize(HeapWord* mem) const {
395 mem_clear(mem);
396 return finish(mem);
397 }
398
399 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
400 // Set array length before setting the _klass field because a
401 // non-null klass field indicates that the object is parsable by
402 // concurrent GC.
403 assert(_length >= 0, "length should be non-negative");
404 if (_do_zero) {
405 mem_clear(mem);
406 mem_zap_start_padding(mem);
407 mem_zap_end_padding(mem);
408 }
409 arrayOopDesc::set_length(mem, _length);
410 return finish(mem);
411 }
412
413 #ifndef PRODUCT
414 void ObjArrayAllocator::mem_zap_start_padding(HeapWord* mem) const {
415 const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
416 const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
417 const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes();
418
419 const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
420 const address header_end = reinterpret_cast<address>(mem) + header_size_in_bytes;
421
422 if (header_end < base) {
423 const size_t padding_in_bytes = base - header_end;
424 Copy::fill_to_bytes(header_end, padding_in_bytes, heapPaddingByteVal);
425 }
426 }
427
428 void ObjArrayAllocator::mem_zap_end_padding(HeapWord* mem) const {
429 const size_t length_in_bytes = static_cast<size_t>(_length) << ArrayKlass::cast(_klass)->log2_element_size();
430 const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
431 const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
432 const size_t size_in_bytes = _word_size * BytesPerWord;
433
434 const address obj_end = reinterpret_cast<address>(mem) + size_in_bytes;
435 const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
436 const address elements_end = base + length_in_bytes;
437 assert(elements_end <= obj_end, "payload must fit in object");
438 if (elements_end < obj_end) {
439 const size_t padding_in_bytes = obj_end - elements_end;
440 Copy::fill_to_bytes(elements_end, padding_in_bytes, heapPaddingByteVal);
441 }
442 }
443 #endif
444
445 oop ClassAllocator::initialize(HeapWord* mem) const {
446 // Set oop_size field before setting the _klass field because a
447 // non-null _klass field indicates that the object is parsable by
448 // concurrent GC.
449 assert(_base_size > 0, "oop_size must be positive.");
450 mem_clear(mem);
451 java_lang_Class::set_oop_size(mem, _base_size);
452 return finish(mem);
453 }