1 /*
  2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/javaClasses.hpp"
 27 #include "classfile/vmClasses.hpp"
 28 #include "gc/shared/allocTracer.hpp"
 29 #include "gc/shared/collectedHeap.hpp"
 30 #include "gc/shared/memAllocator.hpp"
 31 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 32 #include "gc/shared/tlab_globals.hpp"
 33 #include "memory/universe.hpp"
 34 #include "oops/arrayOop.hpp"
 35 #include "oops/oop.inline.hpp"
 36 #include "prims/jvmtiExport.hpp"
 37 #include "runtime/continuationJavaClasses.inline.hpp"
 38 #include "runtime/handles.inline.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "runtime/javaThread.hpp"
 41 #include "services/lowMemoryDetector.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/copy.hpp"
 44 
 45 class MemAllocator::Allocation: StackObj {
 46   friend class MemAllocator;
 47 
 48   const MemAllocator& _allocator;
 49   JavaThread*         _thread;
 50   oop*                _obj_ptr;
 51   bool                _overhead_limit_exceeded;
 52   bool                _allocated_outside_tlab;
 53   size_t              _allocated_tlab_size;
 54   bool                _tlab_end_reset_for_sample;
 55 
 56   bool check_out_of_memory();
 57   void verify_before();
 58   void verify_after();
 59   void notify_allocation();
 60   void notify_allocation_jvmti_sampler();
 61   void notify_allocation_low_memory_detector();
 62   void notify_allocation_jfr_sampler();
 63   void notify_allocation_dtrace_sampler();
 64   void check_for_bad_heap_word_value() const;
 65 #ifdef ASSERT
 66   void check_for_valid_allocation_state() const;
 67 #endif
 68 
 69   class PreserveObj;
 70 
 71 public:
 72   Allocation(const MemAllocator& allocator, oop* obj_ptr)
 73     : _allocator(allocator),
 74       _thread(JavaThread::current()),
 75       _obj_ptr(obj_ptr),
 76       _overhead_limit_exceeded(false),
 77       _allocated_outside_tlab(false),
 78       _allocated_tlab_size(0),
 79       _tlab_end_reset_for_sample(false)
 80   {
 81     verify_before();
 82   }
 83 
 84   ~Allocation() {
 85     if (!check_out_of_memory()) {
 86       verify_after();
 87       notify_allocation();
 88     }
 89   }
 90 
 91   oop obj() const { return *_obj_ptr; }
 92 };
 93 
 94 class MemAllocator::Allocation::PreserveObj: StackObj {
 95   HandleMark _handle_mark;
 96   Handle     _handle;
 97   oop* const _obj_ptr;
 98 
 99 public:
100   PreserveObj(JavaThread* thread, oop* obj_ptr)
101     : _handle_mark(thread),
102       _handle(thread, *obj_ptr),
103       _obj_ptr(obj_ptr)
104   {
105     *obj_ptr = NULL;
106   }
107 
108   ~PreserveObj() {
109     *_obj_ptr = _handle();
110   }
111 
112   oop operator()() const {
113     return _handle();
114   }
115 };
116 
117 bool MemAllocator::Allocation::check_out_of_memory() {
118   JavaThread* THREAD = _thread; // For exception macros.
119   assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
120 
121   if (obj() != NULL) {
122     return false;
123   }
124 
125   const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
126   if (!_thread->in_retryable_allocation()) {
127     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
128     report_java_out_of_memory(message);
129 
130     if (JvmtiExport::should_post_resource_exhausted()) {
131       JvmtiExport::post_resource_exhausted(
132         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
133         message);
134     }
135     oop exception = _overhead_limit_exceeded ?
136         Universe::out_of_memory_error_gc_overhead_limit() :
137         Universe::out_of_memory_error_java_heap();
138     THROW_OOP_(exception, true);
139   } else {
140     THROW_OOP_(Universe::out_of_memory_error_retry(), true);
141   }
142 }
143 
144 void MemAllocator::Allocation::verify_before() {
145   // Clear unhandled oops for memory allocation.  Memory allocation might
146   // not take out a lock if from tlab, so clear here.
147   JavaThread* THREAD = _thread; // For exception macros.
148   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
149   debug_only(check_for_valid_allocation_state());
150   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
151 }
152 
153 void MemAllocator::Allocation::verify_after() {
154   NOT_PRODUCT(check_for_bad_heap_word_value();)
155 }
156 
157 void MemAllocator::Allocation::check_for_bad_heap_word_value() const {
158   MemRegion obj_range = _allocator.obj_memory_range(obj());
159   HeapWord* addr = obj_range.start();
160   size_t size = obj_range.word_size();
161   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
162     for (size_t slot = 0; slot < size; slot += 1) {
163       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
164              "Found badHeapWordValue in post-allocation check");
165     }
166   }
167 }
168 
169 #ifdef ASSERT
170 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
171   // How to choose between a pending exception and a potential
172   // OutOfMemoryError?  Don't allow pending exceptions.
173   // This is a VM policy failure, so how do we exhaustively test it?
174   assert(!_thread->has_pending_exception(),
175          "shouldn't be allocating with pending exception");
176   // Allocation of an oop can always invoke a safepoint.
177   JavaThread::cast(_thread)->check_for_valid_safepoint_state();
178 }
179 #endif
180 
181 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
182   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
183   JvmtiExport::vm_object_alloc_event_collector(obj());
184 
185   if (!JvmtiExport::should_post_sampled_object_alloc()) {
186     // Sampling disabled
187     return;
188   }
189 
190   if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
191     // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
192     // or expands it due to taking a sampler induced slow path.
193     return;
194   }
195 
196   // If we want to be sampling, protect the allocated object with a Handle
197   // before doing the callback. The callback is done in the destructor of
198   // the JvmtiSampledObjectAllocEventCollector.
199   size_t bytes_since_last = 0;
200 
201   {
202     PreserveObj obj_h(_thread, _obj_ptr);
203     JvmtiSampledObjectAllocEventCollector collector;
204     size_t size_in_bytes = _allocator._word_size * HeapWordSize;
205     ThreadLocalAllocBuffer& tlab = _thread->tlab();
206 
207     if (!_allocated_outside_tlab) {
208       bytes_since_last = tlab.bytes_since_last_sample_point();
209     }
210 
211     _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
212   }
213 
214   if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
215     // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
216     _thread->tlab().set_sample_end(bytes_since_last != 0);
217   }
218 }
219 
220 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
221   // support low memory notifications (no-op if not enabled)
222   LowMemoryDetector::detect_low_memory_for_collected_pools();
223 }
224 
225 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
226   HeapWord* mem = cast_from_oop<HeapWord*>(obj());
227   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
228 
229   if (_allocated_outside_tlab) {
230     AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
231   } else if (_allocated_tlab_size != 0) {
232     // TLAB was refilled
233     AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
234                                              size_in_bytes, _thread);
235   }
236 }
237 
238 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
239   if (DTraceAllocProbes) {
240     // support for Dtrace object alloc event (no-op most of the time)
241     Klass* klass = obj()->klass();
242     size_t word_size = _allocator._word_size;
243     if (klass != NULL && klass->name() != NULL) {
244       SharedRuntime::dtrace_object_alloc(Thread::current(), obj(), word_size);
245     }
246   }
247 }
248 
249 void MemAllocator::Allocation::notify_allocation() {
250   notify_allocation_low_memory_detector();
251   notify_allocation_jfr_sampler();
252   notify_allocation_dtrace_sampler();
253   notify_allocation_jvmti_sampler();
254 }
255 
256 HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
257   allocation._allocated_outside_tlab = true;
258   HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
259   if (mem == NULL) {
260     return mem;
261   }
262 
263   NOT_PRODUCT(Universe::heap()->check_for_non_bad_heap_word_value(mem, _word_size));
264   size_t size_in_bytes = _word_size * HeapWordSize;
265   _thread->incr_allocated_bytes(size_in_bytes);
266 
267   return mem;
268 }
269 
270 HeapWord* MemAllocator::allocate_inside_tlab(Allocation& allocation) const {
271   assert(UseTLAB, "should use UseTLAB");
272 
273   // Try allocating from an existing TLAB.
274   HeapWord* mem = allocate_inside_tlab_fast();
275   if (mem != NULL) {
276     return mem;
277   }
278 
279   // Try refilling the TLAB and allocating the object in it.
280   return allocate_inside_tlab_slow(allocation);
281 }
282 
283 HeapWord* MemAllocator::allocate_inside_tlab_fast() const {
284   return _thread->tlab().allocate(_word_size);
285 }
286 
287 HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const {
288   HeapWord* mem = NULL;
289   ThreadLocalAllocBuffer& tlab = _thread->tlab();
290 
291   if (JvmtiExport::should_post_sampled_object_alloc()) {
292     tlab.set_back_allocation_end();
293     mem = tlab.allocate(_word_size);
294 
295     // We set back the allocation sample point to try to allocate this, reset it
296     // when done.
297     allocation._tlab_end_reset_for_sample = true;
298 
299     if (mem != NULL) {
300       return mem;
301     }
302   }
303 
304   // Retain tlab and allocate object in shared space if
305   // the amount free in the tlab is too large to discard.
306   if (tlab.free() > tlab.refill_waste_limit()) {
307     tlab.record_slow_allocation(_word_size);
308     return NULL;
309   }
310 
311   // Discard tlab and allocate a new one.
312   // To minimize fragmentation, the last TLAB may be smaller than the rest.
313   size_t new_tlab_size = tlab.compute_size(_word_size);
314 
315   tlab.retire_before_allocation();
316 
317   if (new_tlab_size == 0) {
318     return NULL;
319   }
320 
321   // Allocate a new TLAB requesting new_tlab_size. Any size
322   // between minimal and new_tlab_size is accepted.
323   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
324   mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
325   if (mem == NULL) {
326     assert(allocation._allocated_tlab_size == 0,
327            "Allocation failed, but actual size was updated. min: " SIZE_FORMAT
328            ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
329            min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
330     return NULL;
331   }
332   assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
333          PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
334          p2i(mem), min_tlab_size, new_tlab_size);
335 
336   if (ZeroTLAB) {
337     // ..and clear it.
338     Copy::zero_to_words(mem, allocation._allocated_tlab_size);
339   } else {
340     // ...and zap just allocated object.
341 #ifdef ASSERT
342     // Skip mangling the space corresponding to the object header to
343     // ensure that the returned space is not considered parsable by
344     // any concurrent GC thread.
345     size_t hdr_size = oopDesc::header_size();
346     Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
347 #endif // ASSERT
348   }
349 
350   tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size);
351   return mem;
352 }
353 
354 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
355   if (UseTLAB) {
356     HeapWord* result = allocate_inside_tlab(allocation);
357     if (result != NULL) {
358       return result;
359     }
360   }
361 
362   return allocate_outside_tlab(allocation);
363 }
364 
365 oop MemAllocator::allocate() const {
366   oop obj = NULL;
367   {
368     Allocation allocation(*this, &obj);
369     HeapWord* mem = mem_allocate(allocation);
370     if (mem != NULL) {
371       obj = initialize(mem);
372     } else {
373       // The unhandled oop detector will poison local variable obj,
374       // so reset it to NULL if mem is NULL.
375       obj = NULL;
376     }
377   }
378   return obj;
379 }
380 
381 oop MemAllocator::try_allocate_in_existing_tlab() {
382   oop obj = NULL;
383   {
384     HeapWord* mem = allocate_inside_tlab_fast();
385     if (mem != NULL) {
386       obj = initialize(mem);
387     } else {
388       // The unhandled oop detector will poison local variable obj,
389       // so reset it to NULL if mem is NULL.
390       obj = NULL;
391     }
392   }
393   return obj;
394 }
395 
396 void MemAllocator::mem_clear(HeapWord* mem) const {
397   assert(mem != NULL, "cannot initialize NULL object");
398   const size_t hs = oopDesc::header_size();
399   assert(_word_size >= hs, "unexpected object size");
400   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
401 }
402 
403 oop MemAllocator::finish(HeapWord* mem) const {
404   assert(mem != NULL, "NULL object pointer");
405   // Need a release store to ensure array/class length, mark word, and
406   // object zeroing are visible before setting the klass non-NULL, for
407   // concurrent collectors.
408 #ifdef _LP64
409   oopDesc::release_set_mark(mem, _klass->prototype_header());
410 #else
411   oopDesc::set_mark(mem, _klass->prototype_header());
412   oopDesc::release_set_klass(mem, _klass);
413 #endif
414   return cast_to_oop(mem);
415 }
416 
417 oop ObjAllocator::initialize(HeapWord* mem) const {
418   mem_clear(mem);
419   return finish(mem);
420 }
421 
422 MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
423   if (_do_zero) {
424     return MemAllocator::obj_memory_range(obj);
425   }
426   ArrayKlass* array_klass = ArrayKlass::cast(_klass);
427   const size_t hs = align_up(arrayOopDesc::base_offset_in_bytes(array_klass->element_type()), HeapWordSize) / HeapWordSize;
428   return MemRegion(cast_from_oop<HeapWord*>(obj) + hs, _word_size - hs);
429 }
430 
431 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
432   // Set array length before setting the _klass field because a
433   // non-NULL klass field indicates that the object is parsable by
434   // concurrent GC.
435   assert(_length >= 0, "length should be non-negative");
436   if (_do_zero) {
437     mem_clear(mem);
438   }
439   arrayOopDesc::set_length(mem, _length);
440   return finish(mem);
441 }
442 
443 oop ClassAllocator::initialize(HeapWord* mem) const {
444   // Set oop_size field before setting the _klass field because a
445   // non-NULL _klass field indicates that the object is parsable by
446   // concurrent GC.
447   assert(_word_size > 0, "oop_size must be positive.");
448   mem_clear(mem);
449   java_lang_Class::set_oop_size(mem, _word_size);
450   return finish(mem);
451 }
452 
453 // Does the minimal amount of initialization needed for a TLAB allocation.
454 // We don't need to do a full initialization, as such an allocation need not be immediately walkable.
455 oop StackChunkAllocator::initialize(HeapWord* mem) const {
456   assert(_stack_size > 0, "");
457   assert(_stack_size <= max_jint, "");
458   assert(_word_size > _stack_size, "");
459 
460   // zero out fields (but not the stack)
461   const size_t hs = oopDesc::header_size();
462   Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
463 
464   jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
465   jdk_internal_vm_StackChunk::set_sp(mem, (int)_stack_size);
466 
467   return finish(mem);
468 }