1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "gc/epsilon/epsilonHeap.hpp"
27 #include "gc/epsilon/epsilonInitLogger.hpp"
28 #include "gc/epsilon/epsilonMemoryPool.hpp"
29 #include "gc/epsilon/epsilonThreadLocalData.hpp"
30 #include "gc/shared/gcArguments.hpp"
31 #include "gc/shared/locationPrinter.inline.hpp"
32 #include "logging/log.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/metaspaceUtils.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/atomicAccess.hpp"
38 #include "runtime/globals.hpp"
39 #include "utilities/ostream.hpp"
40
41 jint EpsilonHeap::initialize() {
42 size_t align = HeapAlignment;
43 size_t init_byte_size = align_up(InitialHeapSize, align);
44 size_t max_byte_size = align_up(MaxHeapSize, align);
45
46 // Initialize backing storage
47 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
48 _virtual_space.initialize(heap_rs, init_byte_size);
49
50 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
51
52 initialize_reserved_region(heap_rs);
53
54 _space = new ContiguousSpace();
55 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
56
57 // Precompute hot fields
58 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
59 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
60 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
61 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
62
63 // Enable monitoring
64 _monitoring_support = new EpsilonMonitoringSupport(this);
65 _last_counter_update = 0;
66 _last_heap_print = 0;
67
68 // Install barrier set
69 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
70
71 // All done, print out the configuration
72 EpsilonInitLogger::print();
73
74 return JNI_OK;
75 }
76
77 void EpsilonHeap::initialize_serviceability() {
78 _pool = new EpsilonMemoryPool(this);
79 _memory_manager.add_pool(_pool);
80 }
81
82 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
83 GrowableArray<GCMemoryManager*> memory_managers(1);
84 memory_managers.append(&_memory_manager);
85 return memory_managers;
86 }
87
88 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
89 GrowableArray<MemoryPool*> memory_pools(1);
90 memory_pools.append(_pool);
91 return memory_pools;
92 }
93
94 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
95 // Return max allocatable TLAB size, and let allocation path figure out
96 // the actual allocation size. Note: result should be in bytes.
97 return _max_tlab_size * HeapWordSize;
98 }
99
100 EpsilonHeap* EpsilonHeap::heap() {
101 return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
102 }
103
104 HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
105 assert(is_object_aligned(size), "Allocation size should be aligned: %zu", size);
106
107 HeapWord* res = nullptr;
108 while (true) {
109 // Try to allocate, assume space is available
110 res = _space->par_allocate(size);
111 if (res != nullptr) {
112 break;
113 }
114
115 // Allocation failed, attempt expansion, and retry:
116 {
117 MutexLocker ml(Heap_lock);
118
119 // Try to allocate under the lock, assume another thread was able to expand
120 res = _space->par_allocate(size);
121 if (res != nullptr) {
122 break;
123 }
124
125 // Expand and loop back if space is available
126 size_t size_in_bytes = size * HeapWordSize;
127 size_t uncommitted_space = max_capacity() - capacity();
128 size_t unused_space = max_capacity() - used();
129 size_t want_space = MAX2(size_in_bytes, EpsilonMinHeapExpand);
130 assert(unused_space >= uncommitted_space,
131 "Unused (%zu) >= uncommitted (%zu)",
132 unused_space, uncommitted_space);
133
134 if (want_space < uncommitted_space) {
135 // Enough space to expand in bulk:
136 bool expand = _virtual_space.expand_by(want_space);
137 assert(expand, "Should be able to expand");
138 } else if (size_in_bytes < unused_space) {
139 // No space to expand in bulk, and this allocation is still possible,
140 // take all the remaining space:
141 bool expand = _virtual_space.expand_by(uncommitted_space);
142 assert(expand, "Should be able to expand");
143 } else {
144 // No space left:
145 return nullptr;
146 }
147
148 _space->set_end((HeapWord *) _virtual_space.high());
149 }
150 }
151
152 size_t used = _space->used();
153
154 // Allocation successful, update counters
155 if (verbose) {
156 size_t last = _last_counter_update;
157 if ((used - last >= _step_counter_update) && AtomicAccess::cmpxchg(&_last_counter_update, last, used) == last) {
158 _monitoring_support->update_counters();
159 }
160 }
161
162 // ...and print the occupancy line, if needed
163 if (verbose) {
164 size_t last = _last_heap_print;
165 if ((used - last >= _step_heap_print) && AtomicAccess::cmpxchg(&_last_heap_print, last, used) == last) {
166 print_heap_info(used);
167 print_metaspace_info();
168 }
169 }
170
171 assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
172 return res;
173 }
174
175 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
176 size_t requested_size,
177 size_t* actual_size) {
178 Thread* thread = Thread::current();
179
180 // Defaults in case elastic paths are not taken
181 bool fits = true;
182 size_t size = requested_size;
183 size_t ergo_tlab = requested_size;
184 int64_t time = 0;
185
186 if (EpsilonElasticTLAB) {
187 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
188
189 if (EpsilonElasticTLABDecay) {
190 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
191 time = (int64_t) os::javaTimeNanos();
192
193 assert(last_time <= time, "time should be monotonic");
194
195 // If the thread had not allocated recently, retract the ergonomic size.
196 // This conserves memory when the thread had initial burst of allocations,
197 // and then started allocating only sporadically.
198 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
199 ergo_tlab = 0;
200 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
201 }
202 }
203
204 // If we can fit the allocation under current TLAB size, do so.
205 // Otherwise, we want to elastically increase the TLAB size.
206 fits = (requested_size <= ergo_tlab);
207 if (!fits) {
208 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
209 }
210 }
211
212 // Always honor boundaries
213 size = clamp(size, min_size, _max_tlab_size);
214
215 // Always honor alignment
216 size = align_up(size, MinObjAlignment);
217
218 // Check that adjustments did not break local and global invariants
219 assert(is_object_aligned(size),
220 "Size honors object alignment: %zu", size);
221 assert(min_size <= size,
222 "Size honors min size: %zu <= %zu", min_size, size);
223 assert(size <= _max_tlab_size,
224 "Size honors max size: %zu <= %zu", size, _max_tlab_size);
225 assert(size <= CollectedHeap::max_tlab_size(),
226 "Size honors global max size: %zu <= %zu", size, CollectedHeap::max_tlab_size());
227
228 if (log_is_enabled(Trace, gc)) {
229 ResourceMark rm;
230 log_trace(gc)("TLAB size for \"%s\" (Requested: %zuK, Min: %zu"
231 "K, Max: %zuK, Ergo: %zuK) -> %zuK",
232 thread->name(),
233 requested_size * HeapWordSize / K,
234 min_size * HeapWordSize / K,
235 _max_tlab_size * HeapWordSize / K,
236 ergo_tlab * HeapWordSize / K,
237 size * HeapWordSize / K);
238 }
239
240 // All prepared, let's do it!
241 HeapWord* res = allocate_work(size);
242
243 if (res != nullptr) {
244 // Allocation successful
245 *actual_size = size;
246 if (EpsilonElasticTLABDecay) {
247 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
248 }
249 if (EpsilonElasticTLAB && !fits) {
250 // If we requested expansion, this is our new ergonomic TLAB size
251 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
252 }
253 } else {
254 // Allocation failed, reset ergonomics to try and fit smaller TLABs
255 if (EpsilonElasticTLAB) {
256 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
257 }
258 }
259
260 return res;
261 }
262
263 HeapWord* EpsilonHeap::mem_allocate(size_t size) {
264 return allocate_work(size);
265 }
266
267 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
268 // Cannot use verbose=true because Metaspace is not initialized
269 return allocate_work(size, /* verbose = */false);
270 }
271
272 void EpsilonHeap::collect(GCCause::Cause cause) {
273 switch (cause) {
274 case GCCause::_metadata_GC_threshold:
275 case GCCause::_metadata_GC_clear_soft_refs:
276 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
277 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
278 // re-enter the safepoint again very soon.
279
280 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
281 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
282 MetaspaceGC::compute_new_size();
283 print_metaspace_info();
284 break;
285 default:
286 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
287 }
288 _monitoring_support->update_counters();
289 }
290
291 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
292 collect(gc_cause());
293 }
294
295 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
296 _space->object_iterate(cl);
297 }
298
299 void EpsilonHeap::print_heap_on(outputStream *st) const {
300 st->print_cr("Epsilon Heap");
301
302 StreamIndentor si(st, 1);
303
304 _virtual_space.print_on(st);
305
306 if (_space != nullptr) {
307 st->print_cr("Allocation space:");
308
309 StreamIndentor si(st, 1);
310 _space->print_on(st, "");
311 }
312 }
313
314 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
315 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
316 }
317
318 void EpsilonHeap::print_tracing_info() const {
319 print_heap_info(used());
320 print_metaspace_info();
321 }
322
323 void EpsilonHeap::print_heap_info(size_t used) const {
324 size_t reserved = max_capacity();
325 size_t committed = capacity();
326
327 if (reserved != 0) {
328 log_info(gc)("Heap: %zu%s reserved, %zu%s (%.2f%%) committed, "
329 "%zu%s (%.2f%%) used",
330 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
331 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
332 committed * 100.0 / reserved,
333 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
334 used * 100.0 / reserved);
335 } else {
336 log_info(gc)("Heap: no reliable data");
337 }
338 }
339
340 void EpsilonHeap::print_metaspace_info() const {
341 MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
342 size_t reserved = stats.reserved();
343 size_t committed = stats.committed();
344 size_t used = stats.used();
345
346 if (reserved != 0) {
347 log_info(gc, metaspace)("Metaspace: %zu%s reserved, %zu%s (%.2f%%) committed, "
348 "%zu%s (%.2f%%) used",
349 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
350 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
351 committed * 100.0 / reserved,
352 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
353 used * 100.0 / reserved);
354 } else {
355 log_info(gc, metaspace)("Metaspace: no reliable data");
356 }
357 }