1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/epsilon/epsilonHeap.hpp"
 28 #include "gc/epsilon/epsilonInitLogger.hpp"
 29 #include "gc/epsilon/epsilonMemoryPool.hpp"
 30 #include "gc/epsilon/epsilonThreadLocalData.hpp"
 31 #include "gc/shared/gcArguments.hpp"
 32 #include "gc/shared/locationPrinter.inline.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/allocation.hpp"
 35 #include "memory/allocation.inline.hpp"
 36 #include "memory/metaspaceUtils.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "memory/universe.hpp"
 39 #include "runtime/atomic.hpp"
 40 #include "runtime/globals.hpp"
 41 
 42 jint EpsilonHeap::initialize() {
 43   size_t align = HeapAlignment;
 44   size_t init_byte_size = align_up(InitialHeapSize, align);
 45   size_t max_byte_size  = align_up(MaxHeapSize, align);
 46 
 47   // Initialize backing storage
 48   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
 49   _virtual_space.initialize(heap_rs, init_byte_size);
 50 
 51   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
 52 
 53   initialize_reserved_region(heap_rs);
 54 
 55   _space = new ContiguousSpace();
 56   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
 57 
 58   // Precompute hot fields
 59   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
 60   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
 61   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
 62   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
 63 
 64   // Enable monitoring
 65   _monitoring_support = new EpsilonMonitoringSupport(this);
 66   _last_counter_update = 0;
 67   _last_heap_print = 0;
 68 
 69   // Install barrier set
 70   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
 71 
 72   // All done, print out the configuration
 73   EpsilonInitLogger::print();
 74 
 75   return JNI_OK;
 76 }
 77 
 78 void EpsilonHeap::initialize_serviceability() {
 79   _pool = new EpsilonMemoryPool(this);
 80   _memory_manager.add_pool(_pool);
 81 }
 82 
 83 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
 84   GrowableArray<GCMemoryManager*> memory_managers(1);
 85   memory_managers.append(&_memory_manager);
 86   return memory_managers;
 87 }
 88 
 89 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
 90   GrowableArray<MemoryPool*> memory_pools(1);
 91   memory_pools.append(_pool);
 92   return memory_pools;
 93 }
 94 
 95 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 96   // Return max allocatable TLAB size, and let allocation path figure out
 97   // the actual allocation size. Note: result should be in bytes.
 98   return _max_tlab_size * HeapWordSize;
 99 }
100 
101 EpsilonHeap* EpsilonHeap::heap() {
102   return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
103 }
104 
105 HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
106   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
107 
108   HeapWord* res = nullptr;
109   while (true) {
110     // Try to allocate, assume space is available
111     res = _space->par_allocate(size);
112     if (res != nullptr) {
113       break;
114     }
115 
116     // Allocation failed, attempt expansion, and retry:
117     {
118       MutexLocker ml(Heap_lock);
119 
120       // Try to allocate under the lock, assume another thread was able to expand
121       res = _space->par_allocate(size);
122       if (res != nullptr) {
123         break;
124       }
125 
126       // Expand and loop back if space is available
127       size_t space_left = max_capacity() - capacity();
128       size_t want_space = MAX2(size, EpsilonMinHeapExpand);
129 
130       if (want_space < space_left) {
131         // Enough space to expand in bulk:
132         bool expand = _virtual_space.expand_by(want_space);
133         assert(expand, "Should be able to expand");
134       } else if (size < space_left) {
135         // No space to expand in bulk, and this allocation is still possible,
136         // take all the remaining space:
137         bool expand = _virtual_space.expand_by(space_left);
138         assert(expand, "Should be able to expand");
139       } else {
140         // No space left:
141         return nullptr;
142       }
143 
144       _space->set_end((HeapWord *) _virtual_space.high());
145     }
146   }
147 
148   size_t used = _space->used();
149 
150   // Allocation successful, update counters
151   if (verbose) {
152     size_t last = _last_counter_update;
153     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
154       _monitoring_support->update_counters();
155     }
156   }
157 
158   // ...and print the occupancy line, if needed
159   if (verbose) {
160     size_t last = _last_heap_print;
161     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
162       print_heap_info(used);
163       print_metaspace_info();
164     }
165   }
166 
167   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
168   return res;
169 }
170 
171 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
172                                          size_t requested_size,
173                                          size_t* actual_size) {
174   Thread* thread = Thread::current();
175 
176   // Defaults in case elastic paths are not taken
177   bool fits = true;
178   size_t size = requested_size;
179   size_t ergo_tlab = requested_size;
180   int64_t time = 0;
181 
182   if (EpsilonElasticTLAB) {
183     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
184 
185     if (EpsilonElasticTLABDecay) {
186       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
187       time = (int64_t) os::javaTimeNanos();
188 
189       assert(last_time <= time, "time should be monotonic");
190 
191       // If the thread had not allocated recently, retract the ergonomic size.
192       // This conserves memory when the thread had initial burst of allocations,
193       // and then started allocating only sporadically.
194       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
195         ergo_tlab = 0;
196         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
197       }
198     }
199 
200     // If we can fit the allocation under current TLAB size, do so.
201     // Otherwise, we want to elastically increase the TLAB size.
202     fits = (requested_size <= ergo_tlab);
203     if (!fits) {
204       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
205     }
206   }
207 
208   // Always honor boundaries
209   size = clamp(size, min_size, _max_tlab_size);
210 
211   // Always honor alignment
212   size = align_up(size, MinObjAlignment);
213 
214   // Check that adjustments did not break local and global invariants
215   assert(is_object_aligned(size),
216          "Size honors object alignment: " SIZE_FORMAT, size);
217   assert(min_size <= size,
218          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
219   assert(size <= _max_tlab_size,
220          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
221   assert(size <= CollectedHeap::max_tlab_size(),
222          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
223 
224   if (log_is_enabled(Trace, gc)) {
225     ResourceMark rm;
226     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
227                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
228                   thread->name(),
229                   requested_size * HeapWordSize / K,
230                   min_size * HeapWordSize / K,
231                   _max_tlab_size * HeapWordSize / K,
232                   ergo_tlab * HeapWordSize / K,
233                   size * HeapWordSize / K);
234   }
235 
236   // All prepared, let's do it!
237   HeapWord* res = allocate_work(size);
238 
239   if (res != nullptr) {
240     // Allocation successful
241     *actual_size = size;
242     if (EpsilonElasticTLABDecay) {
243       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
244     }
245     if (EpsilonElasticTLAB && !fits) {
246       // If we requested expansion, this is our new ergonomic TLAB size
247       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
248     }
249   } else {
250     // Allocation failed, reset ergonomics to try and fit smaller TLABs
251     if (EpsilonElasticTLAB) {
252       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
253     }
254   }
255 
256   return res;
257 }
258 
259 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
260   *gc_overhead_limit_was_exceeded = false;
261   return allocate_work(size);
262 }
263 
264 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
265   // Cannot use verbose=true because Metaspace is not initialized
266   return allocate_work(size, /* verbose = */false);
267 }
268 
269 void EpsilonHeap::collect(GCCause::Cause cause) {
270   switch (cause) {
271     case GCCause::_metadata_GC_threshold:
272     case GCCause::_metadata_GC_clear_soft_refs:
273       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
274       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
275       // re-enter the safepoint again very soon.
276 
277       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
278       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
279       MetaspaceGC::compute_new_size();
280       print_metaspace_info();
281       break;
282     default:
283       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
284   }
285   _monitoring_support->update_counters();
286 }
287 
288 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
289   collect(gc_cause());
290 }
291 
292 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
293   _space->object_iterate(cl);
294 }
295 
296 void EpsilonHeap::print_on(outputStream *st) const {
297   st->print_cr("Epsilon Heap");
298 
299   _virtual_space.print_on(st);
300 
301   if (_space != nullptr) {
302     st->print_cr("Allocation space:");
303     _space->print_on(st);
304   }
305 
306   MetaspaceUtils::print_on(st);
307 }
308 
309 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
310   return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
311 }
312 
313 void EpsilonHeap::print_tracing_info() const {
314   print_heap_info(used());
315   print_metaspace_info();
316 }
317 
318 void EpsilonHeap::print_heap_info(size_t used) const {
319   size_t reserved  = max_capacity();
320   size_t committed = capacity();
321 
322   if (reserved != 0) {
323     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
324                  SIZE_FORMAT "%s (%.2f%%) used",
325             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
326             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
327             committed * 100.0 / reserved,
328             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
329             used * 100.0 / reserved);
330   } else {
331     log_info(gc)("Heap: no reliable data");
332   }
333 }
334 
335 void EpsilonHeap::print_metaspace_info() const {
336   MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
337   size_t reserved  = stats.reserved();
338   size_t committed = stats.committed();
339   size_t used      = stats.used();
340 
341   if (reserved != 0) {
342     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
343                             SIZE_FORMAT "%s (%.2f%%) used",
344             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
345             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
346             committed * 100.0 / reserved,
347             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
348             used * 100.0 / reserved);
349   } else {
350     log_info(gc, metaspace)("Metaspace: no reliable data");
351   }
352 }