1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "gc/serial/cardTableRS.hpp"
27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
28 #include "gc/serial/serialHeap.inline.hpp"
29 #include "gc/serial/serialStringDedup.inline.hpp"
30 #include "gc/serial/tenuredGeneration.hpp"
31 #include "gc/shared/adaptiveSizePolicy.hpp"
32 #include "gc/shared/ageTable.inline.hpp"
33 #include "gc/shared/collectorCounters.hpp"
34 #include "gc/shared/continuationGCSupport.inline.hpp"
35 #include "gc/shared/gcArguments.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "gc/shared/gcTimer.hpp"
40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.inline.hpp"
42 #include "gc/shared/oopStorageSet.inline.hpp"
43 #include "gc/shared/referencePolicy.hpp"
44 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
45 #include "gc/shared/scavengableNMethods.hpp"
46 #include "gc/shared/space.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "gc/shared/weakProcessor.hpp"
49 #include "logging/log.hpp"
50 #include "memory/iterator.inline.hpp"
51 #include "memory/reservedSpace.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "oops/instanceRefKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/java.hpp"
56 #include "runtime/javaThread.hpp"
57 #include "runtime/prefetch.inline.hpp"
58 #include "runtime/threads.hpp"
59 #include "utilities/align.hpp"
60 #include "utilities/copy.hpp"
61 #include "utilities/globalDefinitions.hpp"
62 #include "utilities/stack.inline.hpp"
63
64 class PromoteFailureClosure : public InHeapScanClosure {
65 template <typename T>
66 void do_oop_work(T* p) {
67 assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
68 assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
69
70 try_scavenge(p, [] (auto) {});
71 }
72 public:
73 PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
74
75 void do_oop(oop* p) { do_oop_work(p); }
76 void do_oop(narrowOop* p) { do_oop_work(p); }
77 };
78
79 class RootScanClosure : public OffHeapScanClosure {
80 template <typename T>
81 void do_oop_work(T* p) {
82 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
83
84 try_scavenge(p, [] (auto) {});
85 }
86 public:
87 RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
88
89 void do_oop(oop* p) { do_oop_work(p); }
90 void do_oop(narrowOop* p) { do_oop_work(p); }
91 };
92
93 class CLDScanClosure: public CLDClosure {
94
95 class CLDOopClosure : public OffHeapScanClosure {
96 public:
97 // Records whether this CLD contains oops pointing into young-gen after scavenging.
98 bool _has_oops_into_young_gen;
99
100 CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
101 _has_oops_into_young_gen(false) {}
102
103 void do_oop(oop* p) {
104 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
105
106 try_scavenge(p, [&] (oop new_obj) {
107 if (!_has_oops_into_young_gen && is_in_young_gen(new_obj)) {
108 _has_oops_into_young_gen = true;
109 }
110 });
111 }
112
113 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
114 };
115
116 DefNewGeneration* _g;
117 public:
118 CLDScanClosure(DefNewGeneration* g) : _g(g) {}
119
120 void do_cld(ClassLoaderData* cld) {
121 // If the cld has not been dirtied we know that there's
122 // no references into the young gen and we can skip it.
123 if (!cld->has_modified_oops()) {
124 return;
125 }
126
127 CLDOopClosure oop_closure{_g};
128
129 // Clean the cld since we're going to scavenge all the metadata.
130 cld->oops_do(&oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
131
132 if (oop_closure._has_oops_into_young_gen) {
133 cld->record_modified_oops();
134 }
135 }
136 };
137
138 class IsAliveClosure: public BoolObjectClosure {
139 HeapWord* _young_gen_end;
140 public:
141 IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
142
143 bool do_object_b(oop p) {
144 return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
145 }
146 };
147
148 class AdjustWeakRootClosure: public OffHeapScanClosure {
149 template <class T>
150 void do_oop_work(T* p) {
151 DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
152 assert(!heap->is_in_reserved(p), "outside the heap");
153
154 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
155 if (is_in_young_gen(obj)) {
156 assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
157 assert(obj->is_forwarded(), "forwarded before weak-root-processing");
158 oop new_obj = obj->forwardee();
159 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
160 }
161 }
162 public:
163 AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
164
165 void do_oop(oop* p) { do_oop_work(p); }
166 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
167 };
168
169 class KeepAliveClosure: public OopClosure {
170 DefNewGeneration* _young_gen;
171 HeapWord* _young_gen_end;
172 CardTableRS* _rs;
173
174 bool is_in_young_gen(void* p) const {
175 return p < _young_gen_end;
176 }
177
178 template <class T>
179 void do_oop_work(T* p) {
180 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
181
182 if (is_in_young_gen(obj)) {
183 oop new_obj = obj->is_forwarded() ? obj->forwardee()
184 : _young_gen->copy_to_survivor_space(obj);
185 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
186
187 if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
188 _rs->inline_write_ref_field_gc(p);
189 }
190 }
191 }
192 public:
193 KeepAliveClosure(DefNewGeneration* g) :
194 _young_gen(g),
195 _young_gen_end(g->reserved().end()),
196 _rs(SerialHeap::heap()->rem_set()) {}
197
198 void do_oop(oop* p) { do_oop_work(p); }
199 void do_oop(narrowOop* p) { do_oop_work(p); }
200 };
201
202 class FastEvacuateFollowersClosure: public VoidClosure {
203 SerialHeap* _heap;
204 YoungGenScanClosure* _young_cl;
205 OldGenScanClosure* _old_cl;
206 public:
207 FastEvacuateFollowersClosure(SerialHeap* heap,
208 YoungGenScanClosure* young_cl,
209 OldGenScanClosure* old_cl) :
210 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
211 {}
212
213 void do_void() {
214 _heap->scan_evacuated_objs(_young_cl, _old_cl);
215 }
216 };
217
218 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
219 size_t initial_size,
220 size_t min_size,
221 size_t max_size,
222 const char* policy)
223 : Generation(rs, initial_size),
224 _promotion_failed(false),
225 _promo_failure_drain_in_progress(false),
226 _string_dedup_requests()
227 {
228 _eden_space = new ContiguousSpace();
229 _from_space = new ContiguousSpace();
230 _to_space = new ContiguousSpace();
231
232 init_spaces();
233
234 // Compute the maximum eden and survivor space sizes. These sizes
235 // are computed assuming the entire reserved space is committed.
236 // These values are exported as performance counters.
237 uintx size = _virtual_space.reserved_size();
238 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
239 _max_eden_size = size - (2*_max_survivor_size);
240
241 // allocate the performance counters
242
243 // Generation counters -- generation 0, 3 subspaces
244 _gen_counters = new GenerationCounters("new", 0, 3,
245 min_size, max_size, _virtual_space.committed_size());
246 _gc_counters = new CollectorCounters(policy, 0);
247
248 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
249 _gen_counters);
250 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
251 _gen_counters);
252 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
253 _gen_counters);
254
255 update_counters();
256 _old_gen = nullptr;
257 _tenuring_threshold = MaxTenuringThreshold;
258
259 _ref_processor = nullptr;
260
261 _gc_timer = new STWGCTimer();
262
263 _gc_tracer = new DefNewTracer();
264 }
265
266 void DefNewGeneration::init_spaces() {
267 // Using layout: from, to, eden, so only from can be non-empty.
268 assert(eden()->is_empty(), "precondition");
269 assert(to()->is_empty(), "precondition");
270
271 if (!from()->is_empty()) {
272 assert((char*) from()->bottom() == _virtual_space.low(), "inv");
273 }
274
275 // Compute sizes
276 size_t size = _virtual_space.committed_size();
277 size_t survivor_size = compute_survivor_size(size, SpaceAlignment);
278 assert(survivor_size >= from()->used(), "inv");
279 assert(size > 2 * survivor_size, "inv");
280 size_t eden_size = size - (2 * survivor_size);
281 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
282
283 // layout: from, to, eden
284 char* from_start = _virtual_space.low();
285 char* to_start = from_start + survivor_size;
286 char* eden_start = to_start + survivor_size;
287 char* eden_end = eden_start + eden_size;
288
289 assert(eden_end == _virtual_space.high(), "just checking");
290 assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
291 assert(is_aligned(to_start, SpaceAlignment), "checking alignment");
292 assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
293 assert(is_aligned(eden_end, SpaceAlignment), "checking alignment");
294
295 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
296 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)eden_start);
297 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
298
299 // Reset the spaces for their new regions.
300 from()->initialize(fromMR, from()->is_empty());
301 to()->initialize(toMR, true);
302 eden()->initialize(edenMR, true);
303
304 post_resize();
305 }
306
307 void DefNewGeneration::post_resize() {
308 MemRegion cmr((HeapWord*)_virtual_space.low(),
309 (HeapWord*)_virtual_space.high());
310 SerialHeap::heap()->rem_set()->resize_covered_region(cmr);
311 }
312
313 void DefNewGeneration::swap_spaces() {
314 ContiguousSpace* s = from();
315 _from_space = to();
316 _to_space = s;
317
318 if (UsePerfData) {
319 CSpaceCounters* c = _from_counters;
320 _from_counters = _to_counters;
321 _to_counters = c;
322 }
323 }
324
325 bool DefNewGeneration::expand(size_t bytes) {
326 assert(bytes != 0, "precondition");
327 assert(is_aligned(bytes, SpaceAlignment), "precondition");
328
329 bool success = _virtual_space.expand_by(bytes);
330 if (!success) {
331 log_info(gc)("Failed to expand young-gen by %zu bytes", bytes);
332 }
333
334 return success;
335 }
336
337 void DefNewGeneration::expand_eden_by(size_t delta_bytes) {
338 if (!expand(delta_bytes)) {
339 return;
340 }
341
342 MemRegion eden_mr{eden()->bottom(), (HeapWord*)_virtual_space.high()};
343 eden()->initialize(eden_mr, eden()->is_empty());
344
345 post_resize();
346 }
347
348 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
349 size_t thread_increase_size = 0;
350 // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
351 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
352 thread_increase_size = threads_count * NewSizeThreadIncrease;
353 }
354 return thread_increase_size;
355 }
356
357 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
358 size_t new_size_before,
359 size_t alignment,
360 size_t thread_increase_size) const {
361 size_t desired_new_size = new_size_before;
362
363 if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
364
365 // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
366 if (new_size_candidate <= max_uintx - thread_increase_size) {
367 new_size_candidate += thread_increase_size;
368
369 // 2. Check an overflow at 'align_up'.
370 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
371 if (new_size_candidate <= aligned_max) {
372 desired_new_size = align_up(new_size_candidate, alignment);
373 }
374 }
375 }
376
377 return desired_new_size;
378 }
379
380 size_t DefNewGeneration::calculate_desired_young_gen_bytes() const {
381 size_t old_size = SerialHeap::heap()->old_gen()->capacity();
382 size_t new_size_before = _virtual_space.committed_size();
383 size_t min_new_size = NewSize;
384 size_t max_new_size = reserved().byte_size();
385 assert(min_new_size <= new_size_before &&
386 new_size_before <= max_new_size,
387 "just checking");
388 // All space sizes must be multiples of Generation::GenGrain.
389 size_t alignment = Generation::GenGrain;
390
391 int threads_count = Threads::number_of_non_daemon_threads();
392 size_t thread_increase_size = calculate_thread_increase_size(threads_count);
393
394 size_t new_size_candidate = old_size / NewRatio;
395 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
396 // and reverts to previous value if any overflow happens
397 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
398 alignment, thread_increase_size);
399
400 // Adjust new generation size
401 desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
402 if (!from()->is_empty()) {
403 // Mininum constraint to hold all live objs inside from-space.
404 size_t min_survivor_size = align_up(from()->used(), alignment);
405
406 // SurvivorRatio := eden_size / survivor_size
407 // young-gen-size = eden_size + 2 * survivor_size
408 // = SurvivorRatio * survivor_size + 2 * survivor_size
409 // = (SurvivorRatio + 2) * survivor_size
410 size_t min_young_gen_size = min_survivor_size * (SurvivorRatio + 2);
411
412 desired_new_size = MAX2(min_young_gen_size, desired_new_size);
413 }
414 assert(is_aligned(desired_new_size, alignment), "postcondition");
415
416 return desired_new_size;
417 }
418
419 void DefNewGeneration::resize_inner() {
420 assert(eden()->is_empty(), "precondition");
421 assert(to()->is_empty(), "precondition");
422
423 size_t current_young_gen_size_bytes = _virtual_space.committed_size();
424 size_t desired_young_gen_size_bytes = calculate_desired_young_gen_bytes();
425 if (current_young_gen_size_bytes == desired_young_gen_size_bytes) {
426 return;
427 }
428
429 // Commit/uncommit
430 if (desired_young_gen_size_bytes > current_young_gen_size_bytes) {
431 size_t delta_bytes = desired_young_gen_size_bytes - current_young_gen_size_bytes;
432 if (!expand(delta_bytes)) {
433 return;
434 }
435 } else {
436 size_t delta_bytes = current_young_gen_size_bytes - desired_young_gen_size_bytes;
437 _virtual_space.shrink_by(delta_bytes);
438 }
439
440 assert(desired_young_gen_size_bytes == _virtual_space.committed_size(), "inv");
441
442 init_spaces();
443
444 log_debug(gc, ergo, heap)("New generation size %zuK->%zuK [eden=%zuK,survivor=%zuK]",
445 current_young_gen_size_bytes/K, _virtual_space.committed_size()/K,
446 eden()->capacity()/K, from()->capacity()/K);
447 }
448
449 void DefNewGeneration::resize_after_young_gc() {
450 // Called only after successful young-gc.
451 assert(eden()->is_empty(), "precondition");
452 assert(to()->is_empty(), "precondition");
453
454 if ((char*)to()->bottom() == _virtual_space.low()) {
455 // layout: to, from, eden; can't resize.
456 return;
457 }
458
459 assert((char*)from()->bottom() == _virtual_space.low(), "inv");
460 resize_inner();
461 }
462
463 void DefNewGeneration::resize_after_full_gc() {
464 if (eden()->is_empty() && from()->is_empty() && to()->is_empty()) {
465 resize_inner();
466 return;
467 }
468
469 // Usually the young-gen is empty after full-gc.
470 // This is the extreme case; expand young-gen to its max size.
471 if (_virtual_space.uncommitted_size() == 0) {
472 // Already at its max size.
473 return;
474 }
475
476 // Keep from/to and expand eden.
477 expand_eden_by(_virtual_space.uncommitted_size());
478 }
479
480 void DefNewGeneration::ref_processor_init() {
481 assert(_ref_processor == nullptr, "a reference processor already exists");
482 assert(!_reserved.is_empty(), "empty generation?");
483 _span_based_discoverer.set_span(_reserved);
484 _ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
485 }
486
487 size_t DefNewGeneration::capacity() const {
488 return eden()->capacity()
489 + from()->capacity(); // to() is only used during scavenge
490 }
491
492 size_t DefNewGeneration::used() const {
493 return eden()->used()
494 + from()->used(); // to() is only used during scavenge
495 }
496
497 size_t DefNewGeneration::free() const {
498 return eden()->free()
499 + from()->free(); // to() is only used during scavenge
500 }
501
502 size_t DefNewGeneration::max_capacity() const {
503 const size_t reserved_bytes = reserved().byte_size();
504 const size_t min_survivor_bytes = SpaceAlignment;
505 return reserved_bytes - min_survivor_bytes;
506 }
507
508 bool DefNewGeneration::is_in(const void* p) const {
509 return eden()->is_in(p)
510 || from()->is_in(p)
511 || to() ->is_in(p);
512 }
513
514 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
515 return eden()->free();
516 }
517
518 size_t DefNewGeneration::capacity_before_gc() const {
519 return eden()->capacity();
520 }
521
522 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
523 eden()->object_iterate(blk);
524 from()->object_iterate(blk);
525 }
526
527 // If "p" is in the space, returns the address of the start of the
528 // "block" that contains "p". We say "block" instead of "object" since
529 // some heaps may not pack objects densely; a chunk may either be an
530 // object or a non-object. If "p" is not in the space, return null.
531 // Very general, slow implementation.
532 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
533 assert(MemRegion(cs->bottom(), cs->end()).contains(p),
534 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
535 p2i(p), p2i(cs->bottom()), p2i(cs->end()));
536 if (p >= cs->top()) {
537 return cs->top();
538 } else {
539 HeapWord* last = cs->bottom();
540 HeapWord* cur = last;
541 while (cur <= p) {
542 last = cur;
543 cur += cast_to_oop(cur)->size();
544 }
545 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
546 return last;
547 }
548 }
549
550 HeapWord* DefNewGeneration::block_start(const void* p) const {
551 if (eden()->is_in_reserved(p)) {
552 return block_start_const(eden(), p);
553 }
554 if (from()->is_in_reserved(p)) {
555 return block_start_const(from(), p);
556 }
557 assert(to()->is_in_reserved(p), "inv");
558 return block_start_const(to(), p);
559 }
560
561 void DefNewGeneration::adjust_desired_tenuring_threshold() {
562 // Set the desired survivor size to half the real survivor space
563 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
564 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
565
566 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
567
568 if (UsePerfData) {
569 GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
570 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
571 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
572 }
573
574 age_table()->print_age_table();
575 }
576
577 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
578 SerialHeap* heap = SerialHeap::heap();
579
580 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
581 _gc_timer->register_gc_start();
582 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
583 _ref_processor->start_discovery(clear_all_soft_refs);
584
585 _old_gen = heap->old_gen();
586
587 init_assuming_no_promotion_failure();
588
589 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
590
591 heap->trace_heap_before_gc(_gc_tracer);
592
593 // These can be shared for all code paths
594 IsAliveClosure is_alive(this);
595
596 age_table()->clear();
597
598 YoungGenScanClosure young_gen_cl(this);
599 OldGenScanClosure old_gen_cl(this);
600
601 FastEvacuateFollowersClosure evacuate_followers(heap,
602 &young_gen_cl,
603 &old_gen_cl);
604
605 {
606 RootScanClosure oop_closure{this};
607 CLDScanClosure cld_closure{this};
608
609 NMethodToOopClosure nmethod_closure(&oop_closure,
610 NMethodToOopClosure::FixRelocations);
611
612 // Starting tracing from roots, there are 4 kinds of roots in young-gc.
613 //
614 // 1. old-to-young pointers; processing them before relocating other kinds
615 // of roots.
616 _old_gen->scan_old_to_young_refs();
617
618 // 2. CLD; visit all (strong+weak) clds with the same closure, because we
619 // don't perform class unloading during young-gc.
620 ClassLoaderDataGraph::cld_do(&cld_closure);
621
622 // 3. Threads stack frames and nmethods.
623 // Only nmethods that contain pointers into-young need to be processed
624 // during young-gc, and they are tracked in ScavengableNMethods
625 Threads::oops_do(&oop_closure, nullptr);
626 ScavengableNMethods::nmethods_do(&nmethod_closure);
627
628 // 4. VM internal roots.
629 OopStorageSet::strong_oops_do(&oop_closure);
630 }
631
632 // "evacuate followers".
633 evacuate_followers.do_void();
634
635 {
636 // Reference processing
637 KeepAliveClosure keep_alive(this);
638 ReferenceProcessor* rp = ref_processor();
639 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
640 SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
641 const ReferenceProcessorStats& stats = rp->process_discovered_references(task, nullptr, pt);
642 _gc_tracer->report_gc_reference_stats(stats);
643 _gc_tracer->report_tenuring_threshold(tenuring_threshold());
644 pt.print_all_references();
645 }
646
647 {
648 AdjustWeakRootClosure cl{this};
649 WeakProcessor::weak_oops_do(&is_alive, &cl);
650 }
651
652 _string_dedup_requests.flush();
653
654 if (!_promotion_failed) {
655 // Swap the survivor spaces.
656 eden()->clear(SpaceDecorator::Mangle);
657 from()->clear(SpaceDecorator::Mangle);
658 swap_spaces();
659
660 assert(to()->is_empty(), "to space should be empty now");
661
662 adjust_desired_tenuring_threshold();
663 } else {
664 assert(_promo_failure_scan_stack.is_empty(), "post condition");
665 _promo_failure_scan_stack.clear(true); // Clear cached segments.
666
667 remove_forwarding_pointers();
668 log_info(gc, promotion)("Promotion failed");
669
670 _gc_tracer->report_promotion_failed(_promotion_failed_info);
671
672 // Reset the PromotionFailureALot counters.
673 NOT_PRODUCT(heap->reset_promotion_should_fail();)
674 }
675
676 heap->trace_heap_after_gc(_gc_tracer);
677
678 _gc_timer->register_gc_end();
679
680 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
681
682 return !_promotion_failed;
683 }
684
685 void DefNewGeneration::init_assuming_no_promotion_failure() {
686 _promotion_failed = false;
687 _promotion_failed_info.reset();
688 }
689
690 void DefNewGeneration::remove_forwarding_pointers() {
691 assert(_promotion_failed, "precondition");
692
693 // Will enter Full GC soon due to failed promotion. Must reset the mark word
694 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
695 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
696 struct ResetForwardedMarkWord : ObjectClosure {
697 void do_object(oop obj) override {
698 if (obj->is_self_forwarded()) {
699 obj->unset_self_forwarded();
700 } else if (obj->is_forwarded()) {
701 // To restore the klass-bits in the header.
702 // Needed for object iteration to work properly.
703 obj->set_mark(obj->forwardee()->prototype_mark());
704 }
705 }
706 } cl;
707 eden()->object_iterate(&cl);
708 from()->object_iterate(&cl);
709 }
710
711 void DefNewGeneration::handle_promotion_failure(oop old) {
712 log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
713
714 _promotion_failed = true;
715 _promotion_failed_info.register_copy_failure(old->size());
716
717 ContinuationGCSupport::transform_stack_chunk(old);
718
719 // forward to self
720 old->forward_to_self();
721
722 _promo_failure_scan_stack.push(old);
723
724 if (!_promo_failure_drain_in_progress) {
725 // prevent recursion in copy_to_survivor_space()
726 _promo_failure_drain_in_progress = true;
727 drain_promo_failure_scan_stack();
728 _promo_failure_drain_in_progress = false;
729 }
730 }
731
732 oop DefNewGeneration::copy_to_survivor_space(oop old) {
733 assert(is_in_reserved(old) && !old->is_forwarded(),
734 "shouldn't be scavenging this oop");
735 size_t s = old->size();
736 oop obj = nullptr;
737
738 // Try allocating obj in to-space (unless too old)
739 if (old->age() < tenuring_threshold()) {
740 obj = cast_to_oop(to()->allocate(s));
741 }
742
743 bool new_obj_is_tenured = false;
744 // Otherwise try allocating obj tenured
745 if (obj == nullptr) {
746 obj = _old_gen->allocate_for_promotion(old, s);
747 if (obj == nullptr) {
748 handle_promotion_failure(old);
749 return old;
750 }
751
752 new_obj_is_tenured = true;
753 }
754
755 // Prefetch beyond obj
756 const intx interval = PrefetchCopyIntervalInBytes;
757 Prefetch::write(obj, interval);
758
759 // Copy obj
760 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
761
762 ContinuationGCSupport::transform_stack_chunk(obj);
763
764 if (!new_obj_is_tenured) {
765 // Increment age if obj still in new generation
766 obj->incr_age();
767 age_table()->add(obj, s);
768 }
769
770 // Done, insert forward pointer to obj in this header
771 old->forward_to(obj);
772
773 if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
774 // Record old; request adds a new weak reference, which reference
775 // processing expects to refer to a from-space object.
776 _string_dedup_requests.add(old);
777 }
778 return obj;
779 }
780
781 void DefNewGeneration::drain_promo_failure_scan_stack() {
782 PromoteFailureClosure cl{this};
783 while (!_promo_failure_scan_stack.is_empty()) {
784 oop obj = _promo_failure_scan_stack.pop();
785 obj->oop_iterate(&cl);
786 }
787 }
788
789 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
790 if (_promotion_failed) {
791 return;
792 }
793
794 const size_t MinFreeScratchWords = 100;
795
796 ContiguousSpace* to_space = to();
797 const size_t free_words = pointer_delta(to_space->end(), to_space->top());
798 if (free_words >= MinFreeScratchWords) {
799 scratch = to_space->top();
800 num_words = free_words;
801 }
802 }
803
804 void DefNewGeneration::reset_scratch() {
805 // If contributing scratch in to_space, mangle all of
806 // to_space if ZapUnusedHeapArea. This is needed because
807 // top is not maintained while using to-space as scratch.
808 if (ZapUnusedHeapArea) {
809 to()->mangle_unused_area();
810 }
811 }
812
813 void DefNewGeneration::gc_epilogue() {
814 assert(!GCLocker::is_active(), "We should not be executing here");
815 // update the generation and space performance counters
816 update_counters();
817 }
818
819 void DefNewGeneration::update_counters() {
820 if (UsePerfData) {
821 _eden_counters->update_all();
822 _from_counters->update_all();
823 _to_counters->update_all();
824 _gen_counters->update_capacity(_virtual_space.committed_size());
825 }
826 }
827
828 void DefNewGeneration::verify() {
829 eden()->verify();
830 from()->verify();
831 to()->verify();
832 }
833
834 void DefNewGeneration::print_on(outputStream* st) const {
835 st->print("%-10s", name());
836
837 st->print(" total %zuK, used %zuK ", capacity() / K, used() / K);
838 _virtual_space.print_space_boundaries_on(st);
839
840 StreamIndentor si(st, 1);
841 eden()->print_on(st, "eden ");
842 from()->print_on(st, "from ");
843 to()->print_on(st, "to ");
844 }
845
846 HeapWord* DefNewGeneration::expand_and_allocate(size_t word_size) {
847 assert(Heap_lock->is_locked(), "precondition");
848
849 size_t eden_free_bytes = eden()->free();
850 size_t requested_bytes = word_size * HeapWordSize;
851 if (eden_free_bytes < requested_bytes) {
852 size_t expand_bytes = requested_bytes - eden_free_bytes;
853 expand_eden_by(align_up(expand_bytes, SpaceAlignment));
854 }
855
856 HeapWord* result = eden()->allocate(word_size);
857 return result;
858 }
859
860 HeapWord* DefNewGeneration::par_allocate(size_t word_size) {
861 return eden()->par_allocate(word_size);
862 }
863
864 size_t DefNewGeneration::tlab_capacity() const {
865 return eden()->capacity();
866 }
867
868 size_t DefNewGeneration::tlab_used() const {
869 return eden()->used();
870 }
871
872 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
873 return unsafe_max_alloc_nogc();
874 }