1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/javaClasses.hpp"
26 #include "classfile/vmClasses.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "gc/shared/memAllocator.hpp"
30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/jvmtiExport.hpp"
36 #include "runtime/continuationJavaClasses.inline.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "services/lowMemoryDetector.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/copy.hpp"
43 #include "utilities/globalDefinitions.hpp"
44
45 class MemAllocator::Allocation: StackObj {
46 friend class MemAllocator;
47
48 const MemAllocator& _allocator;
49 JavaThread* _thread;
50 oop* _obj_ptr;
51 bool _allocated_outside_tlab;
52 size_t _allocated_tlab_size;
53
54 bool check_out_of_memory();
55 void verify_before();
56 void verify_after();
57 void notify_allocation();
58 void notify_allocation_jvmti_sampler();
59 void notify_allocation_low_memory_detector();
60 void notify_allocation_jfr_sampler();
61 void notify_allocation_dtrace_sampler();
62 #ifdef ASSERT
63 void check_for_valid_allocation_state() const;
64 #endif
65
66 class PreserveObj;
67
68 public:
69 Allocation(const MemAllocator& allocator, oop* obj_ptr)
70 : _allocator(allocator),
71 _thread(JavaThread::cast(allocator._thread)), // Do not use Allocation in non-JavaThreads.
72 _obj_ptr(obj_ptr),
73 _allocated_outside_tlab(false),
74 _allocated_tlab_size(0)
75 {
76 assert(Thread::current() == allocator._thread, "do not pass MemAllocator across threads");
77 verify_before();
78 }
79
80 ~Allocation() {
81 if (!check_out_of_memory()) {
82 notify_allocation();
83 }
84 }
85
86 oop obj() const { return *_obj_ptr; }
87 };
88
89 class MemAllocator::Allocation::PreserveObj: StackObj {
90 HandleMark _handle_mark;
91 Handle _handle;
92 oop* const _obj_ptr;
93
94 public:
95 PreserveObj(JavaThread* thread, oop* obj_ptr)
96 : _handle_mark(thread),
97 _handle(thread, *obj_ptr),
98 _obj_ptr(obj_ptr)
99 {
100 *obj_ptr = nullptr;
101 }
102
103 ~PreserveObj() {
104 *_obj_ptr = _handle();
105 }
106
107 oop operator()() const {
108 return _handle();
109 }
110 };
111
112 bool MemAllocator::Allocation::check_out_of_memory() {
113 JavaThread* THREAD = _thread; // For exception macros.
114 assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
115
116 if (obj() != nullptr) {
117 return false;
118 }
119
120 const char* message = "Java heap space";
121 if (!_thread->is_in_internal_oome_mark()) {
122 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
123 report_java_out_of_memory(message);
124 if (JvmtiExport::should_post_resource_exhausted()) {
125 #ifdef CHECK_UNHANDLED_OOPS
126 // obj is null, no need to handle, but CheckUnhandledOops is not aware about null
127 THREAD->allow_unhandled_oop(_obj_ptr);
128 #endif // CHECK_UNHANDLED_OOPS
129 JvmtiExport::post_resource_exhausted(
130 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
131 message);
132 }
133
134 THROW_OOP_(Universe::out_of_memory_error_java_heap(), true);
135 } else {
136 THROW_OOP_(Universe::out_of_memory_error_java_heap_without_backtrace(), true);
137 }
138 }
139
140 void MemAllocator::Allocation::verify_before() {
141 // Clear unhandled oops for memory allocation. Memory allocation might
142 // not take out a lock if from tlab, so clear here.
143 JavaThread* THREAD = _thread; // For exception macros.
144 assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
145 DEBUG_ONLY(check_for_valid_allocation_state());
146 assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed");
147 }
148
149 #ifdef ASSERT
150 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
151 // How to choose between a pending exception and a potential
152 // OutOfMemoryError? Don't allow pending exceptions.
153 // This is a VM policy failure, so how do we exhaustively test it?
154 assert(!_thread->has_pending_exception(),
155 "shouldn't be allocating with pending exception");
156 // Allocation of an oop can always invoke a safepoint.
157 _thread->check_for_valid_safepoint_state();
158 }
159 #endif
160
161 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
162 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
163 JvmtiExport::vm_object_alloc_event_collector(obj());
164
165 if (!JvmtiExport::should_post_sampled_object_alloc()) {
166 // Sampling disabled
167 return;
168 }
169
170 ThreadHeapSampler& heap_sampler = _thread->heap_sampler();
171 ThreadLocalAllocBuffer& tlab = _thread->tlab();
172
173 // Log sample decision
174 heap_sampler.log_sample_decision(tlab.top());
175
176 if (heap_sampler.should_sample(tlab.top())) {
177 // If we want to be sampling, protect the allocated object with a Handle
178 // before doing the callback. The callback is done in the destructor of
179 // the JvmtiSampledObjectAllocEventCollector.
180 PreserveObj obj_h(_thread, _obj_ptr);
181 JvmtiSampledObjectAllocEventCollector collector;
182
183 // Perform the sampling
184 heap_sampler.sample(obj_h(), tlab.top());
185
186 // Note that after this point all the TLAB can have been retired, and agent
187 // code can run and allocate, don't rely on earlier calculations involving
188 // the TLAB.
189 }
190
191 // Set a new sampling point in the TLAB if it fits in the current TLAB
192 const size_t words_until_sample = heap_sampler.bytes_until_sample(tlab.top()) / HeapWordSize;
193 if (words_until_sample <= tlab.free()) {
194 tlab.set_sampling_point(tlab.top() + words_until_sample);
195 }
196 }
197
198 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
199 // support low memory notifications (no-op if not enabled)
200 LowMemoryDetector::detect_low_memory_for_collected_pools();
201 }
202
203 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
204 HeapWord* mem = cast_from_oop<HeapWord*>(obj());
205 size_t size_in_bytes = _allocator._word_size * HeapWordSize;
206
207 if (_allocated_outside_tlab) {
208 AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
209 } else if (_allocated_tlab_size != 0) {
210 // TLAB was refilled
211 AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
212 size_in_bytes, _thread);
213 }
214 }
215
216 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
217 if (DTraceAllocProbes) {
218 // support for Dtrace object alloc event (no-op most of the time)
219 Klass* klass = obj()->klass();
220 size_t word_size = _allocator._word_size;
221 if (klass != nullptr && klass->name() != nullptr) {
222 SharedRuntime::dtrace_object_alloc(_thread, obj(), word_size);
223 }
224 }
225 }
226
227 void MemAllocator::Allocation::notify_allocation() {
228 notify_allocation_low_memory_detector();
229 notify_allocation_jfr_sampler();
230 notify_allocation_dtrace_sampler();
231 notify_allocation_jvmti_sampler();
232 }
233
234 HeapWord* MemAllocator::mem_allocate_outside_tlab(Allocation& allocation) const {
235 allocation._allocated_outside_tlab = true;
236 HeapWord* mem = Universe::heap()->mem_allocate(_word_size);
237 if (mem == nullptr) {
238 return mem;
239 }
240
241 size_t size_in_bytes = _word_size * HeapWordSize;
242 _thread->incr_allocated_bytes(size_in_bytes);
243 _thread->heap_sampler().inc_outside_tlab_bytes(size_in_bytes);
244
245 return mem;
246 }
247
248 HeapWord* MemAllocator::mem_allocate_inside_tlab_fast() const {
249 return _thread->tlab().allocate(_word_size);
250 }
251
252 HeapWord* MemAllocator::mem_allocate_inside_tlab_slow(Allocation& allocation) const {
253 HeapWord* mem = nullptr;
254 ThreadLocalAllocBuffer& tlab = _thread->tlab();
255
256 if (JvmtiExport::should_post_sampled_object_alloc()) {
257 // When sampling we artificially set the TLAB end to the sample point.
258 // When we hit that point it looks like the TLAB is full, but it's
259 // not necessarily the case. Set the real end and retry the allocation.
260
261 // Undo previous adjustment of end.
262 // Note that notify_allocation_jvmti_sampler will set a new sample point.
263 tlab.set_back_allocation_end();
264
265 // Retry the TLAB allocation with the proper end
266 mem = tlab.allocate(_word_size);
267
268 if (mem != nullptr) {
269 return mem;
270 }
271 }
272
273 // Retain tlab and allocate object in shared space if
274 // the amount free in the tlab is too large to discard.
275 if (tlab.free() > tlab.refill_waste_limit()) {
276 tlab.record_slow_allocation(_word_size);
277 return nullptr;
278 }
279
280 // Discard tlab and allocate a new one.
281
282 // Record the amount wasted
283 tlab.record_refill_waste();
284
285 // Retire the current TLAB
286 _thread->retire_tlab();
287
288 // To minimize fragmentation, the last TLAB may be smaller than the rest.
289 size_t new_tlab_size = tlab.compute_size(_word_size);
290
291 if (new_tlab_size == 0) {
292 return nullptr;
293 }
294
295 // Allocate a new TLAB requesting new_tlab_size. Any size
296 // between minimal and new_tlab_size is accepted.
297 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
298 mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
299 if (mem == nullptr) {
300 assert(allocation._allocated_tlab_size == 0,
301 "Allocation failed, but actual size was updated. min: %zu"
302 ", desired: %zu, actual: %zu",
303 min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
304 return nullptr;
305 }
306 assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
307 PTR_FORMAT " min: %zu, desired: %zu",
308 p2i(mem), min_tlab_size, new_tlab_size);
309
310 // ...and clear or zap just allocated TLAB, if needed.
311 if (ZeroTLAB) {
312 Copy::zero_to_words(mem, allocation._allocated_tlab_size);
313 } else if (ZapTLAB) {
314 // Skip mangling the space corresponding to the object header to
315 // ensure that the returned space is not considered parsable by
316 // any concurrent GC thread.
317 size_t hdr_size = oopDesc::header_size();
318 Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
319 }
320
321 _thread->fill_tlab(mem, _word_size, allocation._allocated_tlab_size);
322
323 return mem;
324 }
325
326 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
327 if (UseTLAB) {
328 // Try allocating from an existing TLAB.
329 HeapWord* mem = mem_allocate_inside_tlab_fast();
330 if (mem != nullptr) {
331 return mem;
332 }
333 }
334
335 // Allocation of an oop can always invoke a safepoint.
336 DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state());
337
338 if (UseTLAB) {
339 // Try refilling the TLAB and allocating the object in it.
340 HeapWord* mem = mem_allocate_inside_tlab_slow(allocation);
341 if (mem != nullptr) {
342 return mem;
343 }
344 }
345
346 return mem_allocate_outside_tlab(allocation);
347 }
348
349 oop MemAllocator::allocate() const {
350 oop obj = nullptr;
351 {
352 Allocation allocation(*this, &obj);
353 HeapWord* mem = mem_allocate(allocation);
354 if (mem != nullptr) {
355 obj = initialize(mem);
356 } else {
357 // The unhandled oop detector will poison local variable obj,
358 // so reset it to null if mem is null.
359 obj = nullptr;
360 }
361 }
362 return obj;
363 }
364
365 void MemAllocator::mem_clear(HeapWord* mem) const {
366 assert(mem != nullptr, "cannot initialize null object");
367 const size_t hs = oopDesc::header_size();
368 assert(_word_size >= hs, "unexpected object size");
369 if (oopDesc::has_klass_gap()) {
370 oopDesc::set_klass_gap(mem, 0);
371 }
372 Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
373 }
374
375 oop MemAllocator::finish(HeapWord* mem) const {
376 assert(mem != nullptr, "null object pointer");
377 // Need a release store to ensure array/class length, mark word, and
378 // object zeroing are visible before setting the klass non-null, for
379 // concurrent collectors.
380 if (UseCompactObjectHeaders) {
381 oopDesc::release_set_mark(mem, _klass->prototype_header());
382 } else {
383 oopDesc::set_mark(mem, markWord::prototype());
384 oopDesc::release_set_klass(mem, _klass);
385 }
386 return cast_to_oop(mem);
387 }
388
389 oop ObjAllocator::initialize(HeapWord* mem) const {
390 mem_clear(mem);
391 return finish(mem);
392 }
393
394 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
395 // Set array length before setting the _klass field because a
396 // non-null klass field indicates that the object is parsable by
397 // concurrent GC.
398 assert(_length >= 0, "length should be non-negative");
399 if (_do_zero) {
400 mem_clear(mem);
401 mem_zap_start_padding(mem);
402 mem_zap_end_padding(mem);
403 }
404 arrayOopDesc::set_length(mem, _length);
405 return finish(mem);
406 }
407
408 #ifndef PRODUCT
409 void ObjArrayAllocator::mem_zap_start_padding(HeapWord* mem) const {
410 const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
411 const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
412 const size_t header_size_in_bytes = arrayOopDesc::header_size_in_bytes();
413
414 const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
415 const address header_end = reinterpret_cast<address>(mem) + header_size_in_bytes;
416
417 if (header_end < base) {
418 const size_t padding_in_bytes = base - header_end;
419 Copy::fill_to_bytes(header_end, padding_in_bytes, heapPaddingByteVal);
420 }
421 }
422
423 void ObjArrayAllocator::mem_zap_end_padding(HeapWord* mem) const {
424 const size_t length_in_bytes = static_cast<size_t>(_length) << ArrayKlass::cast(_klass)->log2_element_size();
425 const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
426 const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
427 const size_t size_in_bytes = _word_size * BytesPerWord;
428
429 const address obj_end = reinterpret_cast<address>(mem) + size_in_bytes;
430 const address base = reinterpret_cast<address>(mem) + base_offset_in_bytes;
431 const address elements_end = base + length_in_bytes;
432 assert(elements_end <= obj_end, "payload must fit in object");
433 if (elements_end < obj_end) {
434 const size_t padding_in_bytes = obj_end - elements_end;
435 Copy::fill_to_bytes(elements_end, padding_in_bytes, heapPaddingByteVal);
436 }
437 }
438 #endif
439
440 oop ClassAllocator::initialize(HeapWord* mem) const {
441 // Set oop_size field before setting the _klass field because a
442 // non-null _klass field indicates that the object is parsable by
443 // concurrent GC.
444 assert(_word_size > 0, "oop_size must be positive.");
445 mem_clear(mem);
446 java_lang_Class::set_oop_size(mem, _word_size);
447 return finish(mem);
448 }
--- EOF ---