1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/codeCache.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "gc/serial/cardTableRS.hpp"
32 #include "gc/serial/serialFullGC.hpp"
33 #include "gc/serial/serialHeap.inline.hpp"
34 #include "gc/serial/serialMemoryPools.hpp"
35 #include "gc/serial/serialVMOperations.hpp"
36 #include "gc/serial/tenuredGeneration.inline.hpp"
37 #include "gc/shared/barrierSetNMethod.hpp"
38 #include "gc/shared/cardTableBarrierSet.hpp"
39 #include "gc/shared/classUnloadingContext.hpp"
40 #include "gc/shared/collectedHeap.inline.hpp"
41 #include "gc/shared/collectorCounters.hpp"
42 #include "gc/shared/continuationGCSupport.inline.hpp"
43 #include "gc/shared/fullGCForwarding.inline.hpp"
44 #include "gc/shared/gcId.hpp"
45 #include "gc/shared/gcInitLogger.hpp"
46 #include "gc/shared/gcLocker.inline.hpp"
47 #include "gc/shared/gcPolicyCounters.hpp"
48 #include "gc/shared/gcTrace.hpp"
49 #include "gc/shared/gcTraceTime.inline.hpp"
50 #include "gc/shared/gcVMOperations.hpp"
51 #include "gc/shared/genArguments.hpp"
52 #include "gc/shared/isGCActiveMark.hpp"
53 #include "gc/shared/locationPrinter.inline.hpp"
54 #include "gc/shared/oopStorage.inline.hpp"
55 #include "gc/shared/oopStorageParState.inline.hpp"
56 #include "gc/shared/oopStorageSet.inline.hpp"
57 #include "gc/shared/scavengableNMethods.hpp"
58 #include "gc/shared/space.hpp"
59 #include "gc/shared/suspendibleThreadSet.hpp"
60 #include "gc/shared/weakProcessor.hpp"
61 #include "gc/shared/workerThread.hpp"
62 #include "memory/iterator.hpp"
63 #include "memory/metaspaceCounters.hpp"
64 #include "memory/metaspaceUtils.hpp"
65 #include "memory/reservedSpace.hpp"
66 #include "memory/resourceArea.hpp"
67 #include "memory/universe.hpp"
68 #include "oops/oop.inline.hpp"
69 #include "runtime/handles.inline.hpp"
70 #include "runtime/init.hpp"
71 #include "runtime/java.hpp"
72 #include "runtime/mutexLocker.hpp"
73 #include "runtime/threads.hpp"
74 #include "runtime/vmThread.hpp"
75 #include "services/memoryManager.hpp"
76 #include "services/memoryService.hpp"
77 #include "utilities/debug.hpp"
78 #include "utilities/formatBuffer.hpp"
79 #include "utilities/macros.hpp"
80 #include "utilities/stack.inline.hpp"
81 #include "utilities/vmError.hpp"
82 #if INCLUDE_JVMCI
83 #include "jvmci/jvmci.hpp"
84 #endif
85
86 SerialHeap* SerialHeap::heap() {
87 return named_heap<SerialHeap>(CollectedHeap::Serial);
88 }
89
90 SerialHeap::SerialHeap() :
91 CollectedHeap(),
92 _young_gen(nullptr),
93 _old_gen(nullptr),
94 _rem_set(nullptr),
95 _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
96 _young_manager(nullptr),
97 _old_manager(nullptr),
98 _is_heap_almost_full(false),
99 _eden_pool(nullptr),
100 _survivor_pool(nullptr),
101 _old_pool(nullptr) {
102 _young_manager = new GCMemoryManager("Copy");
103 _old_manager = new GCMemoryManager("MarkSweepCompact");
104 GCLocker::initialize();
105 }
106
107 void SerialHeap::initialize_serviceability() {
108 DefNewGeneration* young = young_gen();
109
110 // Add a memory pool for each space and young gen doesn't
111 // support low memory detection as it is expected to get filled up.
112 _eden_pool = new ContiguousSpacePool(young->eden(),
113 "Eden Space",
114 young->max_eden_size(),
115 false /* support_usage_threshold */);
116 _survivor_pool = new SurvivorContiguousSpacePool(young,
117 "Survivor Space",
118 young->max_survivor_size(),
119 false /* support_usage_threshold */);
120 TenuredGeneration* old = old_gen();
121 _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true);
122
123 _young_manager->add_pool(_eden_pool);
124 _young_manager->add_pool(_survivor_pool);
125 young->set_gc_manager(_young_manager);
126
127 _old_manager->add_pool(_eden_pool);
128 _old_manager->add_pool(_survivor_pool);
129 _old_manager->add_pool(_old_pool);
130 old->set_gc_manager(_old_manager);
131 }
132
133 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
134 GrowableArray<GCMemoryManager*> memory_managers(2);
135 memory_managers.append(_young_manager);
136 memory_managers.append(_old_manager);
137 return memory_managers;
138 }
139
140 GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
141 GrowableArray<MemoryPool*> memory_pools(3);
142 memory_pools.append(_eden_pool);
143 memory_pools.append(_survivor_pool);
144 memory_pools.append(_old_pool);
145 return memory_pools;
146 }
147
148 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
149 MutexLocker ml(Heap_lock);
150 return old_gen()->allocate(word_size);
151 }
152
153 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
154 assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen");
155 old_gen()->complete_loaded_archive_space(archive_space);
156 }
157
158 void SerialHeap::pin_object(JavaThread* thread, oop obj) {
159 GCLocker::enter(thread);
160 }
161
162 void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
163 GCLocker::exit(thread);
164 }
165
166 jint SerialHeap::initialize() {
167 // Allocate space for the heap.
168
169 ReservedHeapSpace heap_rs = allocate(HeapAlignment);
170
171 if (!heap_rs.is_reserved()) {
172 vm_shutdown_during_initialization(
173 "Could not reserve enough space for object heap");
174 return JNI_ENOMEM;
175 }
176
177 initialize_reserved_region(heap_rs);
178
179 ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, SpaceAlignment);
180 ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, SpaceAlignment);
181
182 _rem_set = new CardTableRS(_reserved);
183 _rem_set->initialize(young_rs.base(), old_rs.base());
184
185 CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
186 BarrierSet::set_barrier_set(bs);
187
188 _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
189 _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set());
190
191 GCInitLogger::print();
192
193 FullGCForwarding::initialize(_reserved);
194
195 return JNI_OK;
196 }
197
198 ReservedHeapSpace SerialHeap::allocate(size_t alignment) {
199 // Now figure out the total size.
200 const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
201 assert(alignment % pageSize == 0, "Must be");
202
203 // Check for overflow.
204 size_t total_reserved = MaxNewSize + MaxOldSize;
205 if (total_reserved < MaxNewSize) {
206 vm_exit_during_initialization("The size of the object heap + VM data exceeds "
207 "the maximum representable size");
208 }
209 assert(total_reserved % alignment == 0,
210 "Gen size; total_reserved=%zu, alignment=%zu", total_reserved, alignment);
211
212 ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
213 size_t used_page_size = heap_rs.page_size();
214
215 os::trace_page_sizes("Heap",
216 MinHeapSize,
217 total_reserved,
218 heap_rs.base(),
219 heap_rs.size(),
220 used_page_size);
221
222 return heap_rs;
223 }
224
225 class GenIsScavengable : public BoolObjectClosure {
226 public:
227 bool do_object_b(oop obj) {
228 return SerialHeap::heap()->is_in_young(obj);
229 }
230 };
231
232 static GenIsScavengable _is_scavengable;
233
234 void SerialHeap::post_initialize() {
235 CollectedHeap::post_initialize();
236
237 DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
238
239 def_new_gen->ref_processor_init();
240
241 SerialFullGC::initialize();
242
243 ScavengableNMethods::initialize(&_is_scavengable);
244 }
245
246 PreGenGCValues SerialHeap::get_pre_gc_values() const {
247 const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
248
249 return PreGenGCValues(def_new_gen->used(),
250 def_new_gen->capacity(),
251 def_new_gen->eden()->used(),
252 def_new_gen->eden()->capacity(),
253 def_new_gen->from()->used(),
254 def_new_gen->from()->capacity(),
255 old_gen()->used(),
256 old_gen()->capacity());
257 }
258
259 size_t SerialHeap::capacity() const {
260 return _young_gen->capacity() + _old_gen->capacity();
261 }
262
263 size_t SerialHeap::used() const {
264 return _young_gen->used() + _old_gen->used();
265 }
266
267 size_t SerialHeap::max_capacity() const {
268 return _young_gen->max_capacity() + _old_gen->max_capacity();
269 }
270
271 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
272 assert(Heap_lock->is_locked(), "precondition");
273
274 HeapWord* result = _young_gen->expand_and_allocate(size);
275
276 if (result == nullptr && !is_tlab) {
277 result = _old_gen->expand_and_allocate(size);
278 }
279
280 assert(result == nullptr || is_in_reserved(result), "result not in heap");
281 return result;
282 }
283
284 HeapWord* SerialHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) {
285 HeapWord* result = _young_gen->par_allocate(size);
286 if (result != nullptr) {
287 return result;
288 }
289 // Try old-gen allocation for non-TLAB.
290 if (!is_tlab) {
291 // If it's too large for young-gen or heap is too full.
292 if (size > heap_word_size(_young_gen->capacity_before_gc()) || _is_heap_almost_full) {
293 result = _old_gen->par_allocate(size);
294 if (result != nullptr) {
295 return result;
296 }
297 }
298 }
299
300 return nullptr;
301 }
302
303 HeapWord* SerialHeap::mem_allocate_work(size_t size, bool is_tlab) {
304 HeapWord* result = nullptr;
305
306 for (uint try_count = 1; /* break */; try_count++) {
307 result = mem_allocate_cas_noexpand(size, is_tlab);
308 if (result != nullptr) {
309 break;
310 }
311 uint gc_count_before; // Read inside the Heap_lock locked region.
312 {
313 MutexLocker ml(Heap_lock);
314
315 // Re-try after acquiring the lock, because a GC might have occurred
316 // while waiting for this lock.
317 result = mem_allocate_cas_noexpand(size, is_tlab);
318 if (result != nullptr) {
319 break;
320 }
321
322 if (!is_init_completed()) {
323 // Can't do GC; try heap expansion to satisfy the request.
324 result = expand_heap_and_allocate(size, is_tlab);
325 if (result != nullptr) {
326 return result;
327 }
328 }
329
330 gc_count_before = total_collections();
331 }
332
333 VM_SerialCollectForAllocation op(size, is_tlab, gc_count_before);
334 VMThread::execute(&op);
335 if (op.gc_succeeded()) {
336 result = op.result();
337 break;
338 }
339
340 // Give a warning if we seem to be looping forever.
341 if ((QueuedAllocationWarningCount > 0) &&
342 (try_count % QueuedAllocationWarningCount == 0)) {
343 log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
344 " size=%zu %s", try_count, size, is_tlab ? "(TLAB)" : "");
345 }
346 }
347
348 assert(result == nullptr || is_in_reserved(result), "postcondition");
349 return result;
350 }
351
352 HeapWord* SerialHeap::mem_allocate(size_t size) {
353 return mem_allocate_work(size,
354 false /* is_tlab */);
355 }
356
357 bool SerialHeap::is_young_gc_safe() const {
358 if (!_young_gen->to()->is_empty()) {
359 return false;
360 }
361 return _old_gen->promotion_attempt_is_safe(_young_gen->used());
362 }
363
364 bool SerialHeap::do_young_collection(bool clear_soft_refs) {
365 if (!is_young_gc_safe()) {
366 return false;
367 }
368 IsSTWGCActiveMark gc_active_mark;
369 SvcGCMarker sgcm(SvcGCMarker::MINOR);
370 GCIdMark gc_id_mark;
371 GCTraceCPUTime tcpu(_young_gen->gc_tracer());
372 GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
373 TraceCollectorStats tcs(_young_gen->counters());
374 TraceMemoryManagerStats tmms(_young_gen->gc_manager(), gc_cause(), "end of minor GC");
375 print_before_gc();
376 const PreGenGCValues pre_gc_values = get_pre_gc_values();
377
378 increment_total_collections(false);
379 const bool should_verify = total_collections() >= VerifyGCStartAt;
380 if (should_verify && VerifyBeforeGC) {
381 prepare_for_verify();
382 Universe::verify("Before GC");
383 }
384 gc_prologue();
385 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
386
387 save_marks();
388
389 bool result = _young_gen->collect(clear_soft_refs);
390
391 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
392
393 // Only update stats for successful young-gc
394 if (result) {
395 _old_gen->update_promote_stats();
396 _young_gen->resize_after_young_gc();
397 }
398
399 if (should_verify && VerifyAfterGC) {
400 Universe::verify("After GC");
401 }
402
403 print_heap_change(pre_gc_values);
404
405 // Track memory usage and detect low memory after GC finishes
406 MemoryService::track_memory_usage();
407
408 gc_epilogue(false);
409
410 print_after_gc();
411
412 return result;
413 }
414
415 void SerialHeap::register_nmethod(nmethod* nm) {
416 ScavengableNMethods::register_nmethod(nm);
417 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
418 bs_nm->disarm(nm);
419 }
420
421 void SerialHeap::unregister_nmethod(nmethod* nm) {
422 ScavengableNMethods::unregister_nmethod(nm);
423 }
424
425 void SerialHeap::verify_nmethod(nmethod* nm) {
426 ScavengableNMethods::verify_nmethod(nm);
427 }
428
429 void SerialHeap::prune_scavengable_nmethods() {
430 ScavengableNMethods::prune_nmethods_not_into_young();
431 }
432
433 void SerialHeap::prune_unlinked_nmethods() {
434 ScavengableNMethods::prune_unlinked_nmethods();
435 }
436
437 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
438 assert(size != 0, "precondition");
439
440 HeapWord* result = nullptr;
441
442 // If young-gen can handle this allocation, attempt young-gc firstly.
443 bool should_run_young_gc = is_tlab || size <= _young_gen->eden()->capacity();
444 collect_at_safepoint(!should_run_young_gc);
445
446 // Just finished a GC, try to satisfy this allocation, using expansion if needed.
447 result = expand_heap_and_allocate(size, is_tlab);
448 if (result != nullptr) {
449 return result;
450 }
451
452 // If we reach this point, we're really out of memory. Try every trick
453 // we can to reclaim memory. Force collection of soft references. Force
454 // a complete compaction of the heap. Any additional methods for finding
455 // free memory should be here, especially if they are expensive. If this
456 // attempt fails, an OOM exception will be thrown.
457 {
458 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
459 const bool clear_all_soft_refs = true;
460 do_full_collection(clear_all_soft_refs);
461 }
462
463 // The previous full-gc can shrink the heap, so re-expand it.
464 result = expand_heap_and_allocate(size, is_tlab);
465 if (result != nullptr) {
466 return result;
467 }
468
469 // What else? We might try synchronous finalization later. If the total
470 // space available is large enough for the allocation, then a more
471 // complete compaction phase than we've tried so far might be
472 // appropriate.
473 return nullptr;
474 }
475
476 template <typename OopClosureType>
477 static void oop_iterate_from(OopClosureType* blk, ContiguousSpace* space, HeapWord** from) {
478 assert(*from != nullptr, "precondition");
479 HeapWord* t;
480 HeapWord* p = *from;
481
482 const intx interval = PrefetchScanIntervalInBytes;
483 do {
484 t = space->top();
485 while (p < t) {
486 Prefetch::write(p, interval);
487 p += cast_to_oop(p)->oop_iterate_size(blk);
488 }
489 } while (t < space->top());
490
491 *from = space->top();
492 }
493
494 void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl,
495 OldGenScanClosure* old_cl) {
496 ContiguousSpace* to_space = young_gen()->to();
497 do {
498 oop_iterate_from(young_cl, to_space, &_young_gen_saved_top);
499 oop_iterate_from(old_cl, old_gen()->space(), &_old_gen_saved_top);
500 // Recheck to-space only, because postcondition of oop_iterate_from is no
501 // unscanned objs
502 } while (_young_gen_saved_top != to_space->top());
503 guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
504 }
505
506 void SerialHeap::collect_at_safepoint(bool full) {
507 assert(!GCLocker::is_active(), "precondition");
508 bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
509
510 if (!full) {
511 bool success = do_young_collection(clear_soft_refs);
512 if (success) {
513 return;
514 }
515 // Upgrade to Full-GC if young-gc fails
516 }
517 do_full_collection(clear_soft_refs);
518 }
519
520 // public collection interfaces
521 void SerialHeap::collect(GCCause::Cause cause) {
522 // The caller doesn't have the Heap_lock
523 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
524
525 unsigned int gc_count_before;
526 unsigned int full_gc_count_before;
527
528 {
529 MutexLocker ml(Heap_lock);
530 // Read the GC count while holding the Heap_lock
531 gc_count_before = total_collections();
532 full_gc_count_before = total_full_collections();
533 }
534
535 bool should_run_young_gc = (cause == GCCause::_wb_young_gc)
536 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
537
538 VM_SerialGCCollect op(!should_run_young_gc,
539 gc_count_before,
540 full_gc_count_before,
541 cause);
542 VMThread::execute(&op);
543 }
544
545 void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
546 IsSTWGCActiveMark gc_active_mark;
547 SvcGCMarker sgcm(SvcGCMarker::FULL);
548 GCIdMark gc_id_mark;
549 GCTraceCPUTime tcpu(SerialFullGC::gc_tracer());
550 GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
551 TraceCollectorStats tcs(_old_gen->counters());
552 TraceMemoryManagerStats tmms(_old_gen->gc_manager(), gc_cause(), "end of major GC");
553 const PreGenGCValues pre_gc_values = get_pre_gc_values();
554 print_before_gc();
555
556 increment_total_collections(true);
557 const bool should_verify = total_collections() >= VerifyGCStartAt;
558 if (should_verify && VerifyBeforeGC) {
559 prepare_for_verify();
560 Universe::verify("Before GC");
561 }
562
563 gc_prologue();
564 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
565 CodeCache::on_gc_marking_cycle_start();
566
567 STWGCTimer* gc_timer = SerialFullGC::gc_timer();
568 gc_timer->register_gc_start();
569
570 SerialOldTracer* gc_tracer = SerialFullGC::gc_tracer();
571 gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
572
573 pre_full_gc_dump(gc_timer);
574
575 SerialFullGC::invoke_at_safepoint(clear_all_soft_refs);
576
577 post_full_gc_dump(gc_timer);
578
579 gc_timer->register_gc_end();
580
581 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
582 CodeCache::on_gc_marking_cycle_finish();
583 CodeCache::arm_all_nmethods();
584 COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
585
586 // Adjust generation sizes.
587 _old_gen->compute_new_size();
588 _young_gen->resize_after_full_gc();
589
590 _old_gen->update_promote_stats();
591
592 // Resize the metaspace capacity after full collections
593 MetaspaceGC::compute_new_size();
594
595 print_heap_change(pre_gc_values);
596
597 // Track memory usage and detect low memory after GC finishes
598 MemoryService::track_memory_usage();
599
600 // Need to tell the epilogue code we are done with Full GC, regardless what was
601 // the initial value for "complete" flag.
602 gc_epilogue(true);
603
604 print_after_gc();
605
606 if (should_verify && VerifyAfterGC) {
607 Universe::verify("After GC");
608 }
609 }
610
611 bool SerialHeap::is_in_young(const void* p) const {
612 bool result = p < _old_gen->reserved().start();
613 assert(result == _young_gen->is_in_reserved(p),
614 "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
615 return result;
616 }
617
618 bool SerialHeap::requires_barriers(stackChunkOop obj) const {
619 return !is_in_young(obj);
620 }
621
622 // Returns "TRUE" iff "p" points into the committed areas of the heap.
623 bool SerialHeap::is_in(const void* p) const {
624 return _young_gen->is_in(p) || _old_gen->is_in(p);
625 }
626
627 void SerialHeap::object_iterate(ObjectClosure* cl) {
628 _young_gen->object_iterate(cl);
629 _old_gen->object_iterate(cl);
630 }
631
632 HeapWord* SerialHeap::block_start(const void* addr) const {
633 assert(is_in_reserved(addr), "block_start of address outside of heap");
634 if (_young_gen->is_in_reserved(addr)) {
635 assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
636 return _young_gen->block_start(addr);
637 }
638
639 assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
640 assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
641 return _old_gen->block_start(addr);
642 }
643
644 bool SerialHeap::block_is_obj(const HeapWord* addr) const {
645 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
646 assert(block_start(addr) == addr, "addr must be a block start");
647
648 if (_young_gen->is_in_reserved(addr)) {
649 return _young_gen->eden()->is_in(addr)
650 || _young_gen->from()->is_in(addr)
651 || _young_gen->to() ->is_in(addr);
652 }
653
654 assert(_old_gen->is_in_reserved(addr), "must be in old-gen");
655 return addr < _old_gen->space()->top();
656 }
657
658 size_t SerialHeap::tlab_capacity() const {
659 // Only young-gen supports tlab allocation.
660 return _young_gen->tlab_capacity();
661 }
662
663 size_t SerialHeap::tlab_used() const {
664 return _young_gen->tlab_used();
665 }
666
667 size_t SerialHeap::unsafe_max_tlab_alloc() const {
668 return _young_gen->unsafe_max_tlab_alloc();
669 }
670
671 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size,
672 size_t requested_size,
673 size_t* actual_size) {
674 HeapWord* result = mem_allocate_work(requested_size /* size */,
675 true /* is_tlab */);
676 if (result != nullptr) {
677 *actual_size = requested_size;
678 }
679
680 return result;
681 }
682
683 void SerialHeap::prepare_for_verify() {
684 ensure_parsability(false); // no need to retire TLABs
685 }
686
687 void SerialHeap::save_marks() {
688 _young_gen_saved_top = _young_gen->to()->top();
689 _old_gen_saved_top = _old_gen->space()->top();
690 }
691
692 void SerialHeap::verify(VerifyOption option /* ignored */) {
693 log_debug(gc, verify)("%s", _old_gen->name());
694 _old_gen->verify();
695
696 log_debug(gc, verify)("%s", _young_gen->name());
697 _young_gen->verify();
698
699 log_debug(gc, verify)("RemSet");
700 rem_set()->verify();
701 }
702
703 void SerialHeap::print_heap_on(outputStream* st) const {
704 assert(_young_gen != nullptr, "precondition");
705 assert(_old_gen != nullptr, "precondition");
706
707 _young_gen->print_on(st);
708 _old_gen->print_on(st);
709 }
710
711 void SerialHeap::print_gc_on(outputStream* st) const {
712 BarrierSet* bs = BarrierSet::barrier_set();
713 if (bs != nullptr) {
714 bs->print_on(st);
715 }
716 }
717
718 void SerialHeap::gc_threads_do(ThreadClosure* tc) const {
719 }
720
721 bool SerialHeap::print_location(outputStream* st, void* addr) const {
722 return BlockLocationPrinter<SerialHeap>::print_location(st, addr);
723 }
724
725 void SerialHeap::print_tracing_info() const {
726 // Does nothing
727 }
728
729 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
730 const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
731
732 log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
733 HEAP_CHANGE_FORMAT" "
734 HEAP_CHANGE_FORMAT,
735 HEAP_CHANGE_FORMAT_ARGS(def_new_gen->name(),
736 pre_gc_values.young_gen_used(),
737 pre_gc_values.young_gen_capacity(),
738 def_new_gen->used(),
739 def_new_gen->capacity()),
740 HEAP_CHANGE_FORMAT_ARGS("Eden",
741 pre_gc_values.eden_used(),
742 pre_gc_values.eden_capacity(),
743 def_new_gen->eden()->used(),
744 def_new_gen->eden()->capacity()),
745 HEAP_CHANGE_FORMAT_ARGS("From",
746 pre_gc_values.from_used(),
747 pre_gc_values.from_capacity(),
748 def_new_gen->from()->used(),
749 def_new_gen->from()->capacity()));
750 log_info(gc, heap)(HEAP_CHANGE_FORMAT,
751 HEAP_CHANGE_FORMAT_ARGS(old_gen()->name(),
752 pre_gc_values.old_gen_used(),
753 pre_gc_values.old_gen_capacity(),
754 old_gen()->used(),
755 old_gen()->capacity()));
756 MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
757 }
758
759 void SerialHeap::gc_prologue() {
760 // Fill TLAB's and such
761 ensure_parsability(true); // retire TLABs
762
763 _old_gen->gc_prologue();
764 };
765
766 void SerialHeap::gc_epilogue(bool full) {
767 #if COMPILER2_OR_JVMCI
768 assert(DerivedPointerTable::is_empty(), "derived pointer present");
769 #endif // COMPILER2_OR_JVMCI
770
771 resize_all_tlabs();
772
773 _young_gen->gc_epilogue();
774 _old_gen->gc_epilogue();
775
776 if (_is_heap_almost_full) {
777 // Reset the emergency state if eden is empty after a young/full gc
778 if (_young_gen->eden()->is_empty()) {
779 _is_heap_almost_full = false;
780 }
781 } else {
782 if (full && !_young_gen->eden()->is_empty()) {
783 // Usually eden should be empty after a full GC, so heap is probably too
784 // full now; entering emergency state.
785 _is_heap_almost_full = true;
786 }
787 }
788
789 MetaspaceCounters::update_performance_counters();
790 };