1 /*
  2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/javaClasses.hpp"
 27 #include "gc/shared/allocTracer.hpp"
 28 #include "gc/shared/collectedHeap.hpp"
 29 #include "gc/shared/memAllocator.hpp"
 30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
 31 #include "gc/shared/tlab_globals.hpp"
 32 #include "memory/universe.hpp"
 33 #include "oops/arrayOop.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "prims/jvmtiExport.hpp"
 36 #include "runtime/sharedRuntime.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/thread.inline.hpp"
 39 #include "services/lowMemoryDetector.hpp"
 40 #include "utilities/align.hpp"
 41 #include "utilities/copy.hpp"
 42 
 43 class MemAllocator::Allocation: StackObj {
 44   friend class MemAllocator;
 45 
 46   const MemAllocator& _allocator;
 47   JavaThread*         _thread;
 48   oop*                _obj_ptr;
 49   bool                _overhead_limit_exceeded;
 50   bool                _allocated_outside_tlab;
 51   size_t              _allocated_tlab_size;
 52   bool                _tlab_end_reset_for_sample;
 53 
 54   bool check_out_of_memory();
 55   void verify_before();
 56   void verify_after();
 57   void notify_allocation();
 58   void notify_allocation_jvmti_sampler();
 59   void notify_allocation_low_memory_detector();
 60   void notify_allocation_jfr_sampler();
 61   void notify_allocation_dtrace_sampler();
 62 #ifdef ASSERT
 63   void check_for_valid_allocation_state() const;
 64 #endif
 65 
 66   class PreserveObj;
 67 
 68 public:
 69   Allocation(const MemAllocator& allocator, oop* obj_ptr)
 70     : _allocator(allocator),
 71       _thread(JavaThread::current()),
 72       _obj_ptr(obj_ptr),
 73       _overhead_limit_exceeded(false),
 74       _allocated_outside_tlab(false),
 75       _allocated_tlab_size(0),
 76       _tlab_end_reset_for_sample(false)
 77   {
 78     verify_before();
 79   }
 80 
 81   ~Allocation() {
 82     if (!check_out_of_memory()) {
 83       notify_allocation();
 84     }
 85   }
 86 
 87   oop obj() const { return *_obj_ptr; }
 88 };
 89 
 90 class MemAllocator::Allocation::PreserveObj: StackObj {
 91   HandleMark _handle_mark;
 92   Handle     _handle;
 93   oop* const _obj_ptr;
 94 
 95 public:
 96   PreserveObj(JavaThread* thread, oop* obj_ptr)
 97     : _handle_mark(thread),
 98       _handle(thread, *obj_ptr),
 99       _obj_ptr(obj_ptr)
100   {
101     *obj_ptr = NULL;
102   }
103 
104   ~PreserveObj() {
105     *_obj_ptr = _handle();
106   }
107 
108   oop operator()() const {
109     return _handle();
110   }
111 };
112 
113 bool MemAllocator::Allocation::check_out_of_memory() {
114   JavaThread* THREAD = _thread; // For exception macros.
115   assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
116 
117   if (obj() != NULL) {
118     return false;
119   }
120 
121   const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
122   if (!_thread->in_retryable_allocation()) {
123     // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
124     report_java_out_of_memory(message);
125 
126     if (JvmtiExport::should_post_resource_exhausted()) {
127       JvmtiExport::post_resource_exhausted(
128         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
129         message);
130     }
131     oop exception = _overhead_limit_exceeded ?
132         Universe::out_of_memory_error_gc_overhead_limit() :
133         Universe::out_of_memory_error_java_heap();
134     THROW_OOP_(exception, true);
135   } else {
136     THROW_OOP_(Universe::out_of_memory_error_retry(), true);
137   }
138 }
139 
140 void MemAllocator::Allocation::verify_before() {
141   // Clear unhandled oops for memory allocation.  Memory allocation might
142   // not take out a lock if from tlab, so clear here.
143   JavaThread* THREAD = _thread; // For exception macros.
144   assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
145   debug_only(check_for_valid_allocation_state());
146   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
147 }
148 
149 #ifdef ASSERT
150 void MemAllocator::Allocation::check_for_valid_allocation_state() const {
151   // How to choose between a pending exception and a potential
152   // OutOfMemoryError?  Don't allow pending exceptions.
153   // This is a VM policy failure, so how do we exhaustively test it?
154   assert(!_thread->has_pending_exception(),
155          "shouldn't be allocating with pending exception");
156   // Allocation of an oop can always invoke a safepoint.
157   _thread->as_Java_thread()->check_for_valid_safepoint_state();
158 }
159 #endif
160 
161 void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
162   // support for JVMTI VMObjectAlloc event (no-op if not enabled)
163   JvmtiExport::vm_object_alloc_event_collector(obj());
164 
165   if (!JvmtiExport::should_post_sampled_object_alloc()) {
166     // Sampling disabled
167     return;
168   }
169 
170   if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
171     // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
172     // or expands it due to taking a sampler induced slow path.
173     return;
174   }
175 
176   // If we want to be sampling, protect the allocated object with a Handle
177   // before doing the callback. The callback is done in the destructor of
178   // the JvmtiSampledObjectAllocEventCollector.
179   size_t bytes_since_last = 0;
180 
181   {
182     PreserveObj obj_h(_thread, _obj_ptr);
183     JvmtiSampledObjectAllocEventCollector collector;
184     size_t size_in_bytes = _allocator._word_size * HeapWordSize;
185     ThreadLocalAllocBuffer& tlab = _thread->tlab();
186 
187     if (!_allocated_outside_tlab) {
188       bytes_since_last = tlab.bytes_since_last_sample_point();
189     }
190 
191     _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
192   }
193 
194   if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
195     // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
196     _thread->tlab().set_sample_end(bytes_since_last != 0);
197   }
198 }
199 
200 void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
201   // support low memory notifications (no-op if not enabled)
202   LowMemoryDetector::detect_low_memory_for_collected_pools();
203 }
204 
205 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
206   HeapWord* mem = cast_from_oop<HeapWord*>(obj());
207   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
208 
209   if (_allocated_outside_tlab) {
210     AllocTracer::send_allocation_outside_tlab(obj()->klass(), mem, size_in_bytes, _thread);
211   } else if (_allocated_tlab_size != 0) {
212     // TLAB was refilled
213     AllocTracer::send_allocation_in_new_tlab(obj()->klass(), mem, _allocated_tlab_size * HeapWordSize,
214                                              size_in_bytes, _thread);
215   }
216 }
217 
218 void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
219   if (DTraceAllocProbes) {
220     // support for Dtrace object alloc event (no-op most of the time)
221     Klass* klass = obj()->klass();
222     size_t word_size = _allocator._word_size;
223     if (klass != NULL && klass->name() != NULL) {
224       SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
225     }
226   }
227 }
228 
229 void MemAllocator::Allocation::notify_allocation() {
230   notify_allocation_low_memory_detector();
231   notify_allocation_jfr_sampler();
232   notify_allocation_dtrace_sampler();
233   notify_allocation_jvmti_sampler();
234 }
235 
236 HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
237   allocation._allocated_outside_tlab = true;
238   HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
239   if (mem == NULL) {
240     return mem;
241   }
242 
243   size_t size_in_bytes = _word_size * HeapWordSize;
244   _thread->incr_allocated_bytes(size_in_bytes);
245 
246   return mem;
247 }
248 
249 HeapWord* MemAllocator::allocate_inside_tlab(Allocation& allocation) const {
250   assert(UseTLAB, "should use UseTLAB");
251 
252   // Try allocating from an existing TLAB.
253   HeapWord* mem = _thread->tlab().allocate(_word_size);
254   if (mem != NULL) {
255     return mem;
256   }
257 
258   // Try refilling the TLAB and allocating the object in it.
259   return allocate_inside_tlab_slow(allocation);
260 }
261 
262 HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const {
263   HeapWord* mem = NULL;
264   ThreadLocalAllocBuffer& tlab = _thread->tlab();
265 
266   if (JvmtiExport::should_post_sampled_object_alloc()) {
267     tlab.set_back_allocation_end();
268     mem = tlab.allocate(_word_size);
269 
270     // We set back the allocation sample point to try to allocate this, reset it
271     // when done.
272     allocation._tlab_end_reset_for_sample = true;
273 
274     if (mem != NULL) {
275       return mem;
276     }
277   }
278 
279   // Retain tlab and allocate object in shared space if
280   // the amount free in the tlab is too large to discard.
281   if (tlab.free() > tlab.refill_waste_limit()) {
282     tlab.record_slow_allocation(_word_size);
283     return NULL;
284   }
285 
286   // Discard tlab and allocate a new one.
287   // To minimize fragmentation, the last TLAB may be smaller than the rest.
288   size_t new_tlab_size = tlab.compute_size(_word_size);
289 
290   tlab.retire_before_allocation();
291 
292   if (new_tlab_size == 0) {
293     return NULL;
294   }
295 
296   // Allocate a new TLAB requesting new_tlab_size. Any size
297   // between minimal and new_tlab_size is accepted.
298   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
299   mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
300   if (mem == NULL) {
301     assert(allocation._allocated_tlab_size == 0,
302            "Allocation failed, but actual size was updated. min: " SIZE_FORMAT
303            ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
304            min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
305     return NULL;
306   }
307   assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
308          PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
309          p2i(mem), min_tlab_size, new_tlab_size);
310 
311   if (ZeroTLAB) {
312     // ..and clear it.
313     Copy::zero_to_words(mem, allocation._allocated_tlab_size);
314   } else {
315     // ...and zap just allocated object.
316 #ifdef ASSERT
317     // Skip mangling the space corresponding to the object header to
318     // ensure that the returned space is not considered parsable by
319     // any concurrent GC thread.
320     size_t hdr_size = oopDesc::header_size();
321     Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
322 #endif // ASSERT
323   }
324 
325   tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size);
326   return mem;
327 }
328 
329 HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
330   if (UseTLAB) {
331     HeapWord* result = allocate_inside_tlab(allocation);
332     if (result != NULL) {
333       return result;
334     }
335   }
336 
337   return allocate_outside_tlab(allocation);
338 }
339 
340 oop MemAllocator::allocate() const {
341   oop obj = NULL;
342   {
343     Allocation allocation(*this, &obj);
344     HeapWord* mem = mem_allocate(allocation);
345     if (mem != NULL) {
346       obj = initialize(mem);
347     } else {
348       // The unhandled oop detector will poison local variable obj,
349       // so reset it to NULL if mem is NULL.
350       obj = NULL;
351     }
352   }
353   return obj;
354 }
355 
356 void MemAllocator::mem_clear(HeapWord* mem) const {
357   assert(mem != NULL, "cannot initialize NULL object");
358   const size_t hs = oopDesc::header_size();
359   assert(_word_size >= hs, "unexpected object size");
360   oopDesc::set_klass_gap(mem, 0);


361   Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
362 }
363 
364 oop MemAllocator::finish(HeapWord* mem) const {
365   assert(mem != NULL, "NULL object pointer");
366   if (UseBiasedLocking) {
367     oopDesc::set_mark(mem, _klass->prototype_header());


368   } else {
369     // May be bootstrapping
370     oopDesc::set_mark(mem, markWord::prototype());
371   }
372   // Need a release store to ensure array/class length, mark word, and
373   // object zeroing are visible before setting the klass non-NULL, for
374   // concurrent collectors.
375   oopDesc::release_set_klass(mem, _klass);


376   return cast_to_oop(mem);
377 }
378 
379 oop ObjAllocator::initialize(HeapWord* mem) const {
380   mem_clear(mem);
381   return finish(mem);
382 }
383 
384 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
385   // Set array length before setting the _klass field because a
386   // non-NULL klass field indicates that the object is parsable by
387   // concurrent GC.
388   assert(_length >= 0, "length should be non-negative");
389   if (_do_zero) {
390     mem_clear(mem);
391   }
392   arrayOopDesc::set_length(mem, _length);
393   return finish(mem);
394 }
395 
396 oop ClassAllocator::initialize(HeapWord* mem) const {
397   // Set oop_size field before setting the _klass field because a
398   // non-NULL _klass field indicates that the object is parsable by
399   // concurrent GC.
400   assert(_word_size > 0, "oop_size must be positive.");
401   mem_clear(mem);
402   java_lang_Class::set_oop_size(mem, (int)_word_size);
403   return finish(mem);
404 }
--- EOF ---