1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "gc/serial/cardTableRS.hpp"
27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
28 #include "gc/serial/serialHeap.inline.hpp"
29 #include "gc/serial/serialStringDedup.inline.hpp"
30 #include "gc/serial/tenuredGeneration.hpp"
31 #include "gc/shared/adaptiveSizePolicy.hpp"
32 #include "gc/shared/ageTable.inline.hpp"
33 #include "gc/shared/collectorCounters.hpp"
34 #include "gc/shared/continuationGCSupport.inline.hpp"
35 #include "gc/shared/gcArguments.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/gcPolicyCounters.hpp"
39 #include "gc/shared/gcTimer.hpp"
40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.inline.hpp"
42 #include "gc/shared/hSpaceCounters.hpp"
43 #include "gc/shared/oopStorageSet.inline.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
46 #include "gc/shared/scavengableNMethods.hpp"
47 #include "gc/shared/space.hpp"
48 #include "gc/shared/spaceDecorator.hpp"
49 #include "gc/shared/weakProcessor.hpp"
50 #include "logging/log.hpp"
51 #include "memory/iterator.inline.hpp"
52 #include "memory/reservedSpace.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/instanceRefKlass.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "runtime/java.hpp"
57 #include "runtime/javaThread.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "runtime/threads.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/copy.hpp"
62 #include "utilities/globalDefinitions.hpp"
63 #include "utilities/stack.inline.hpp"
64
65 class PromoteFailureClosure : public InHeapScanClosure {
66 template <typename T>
67 void do_oop_work(T* p) {
68 assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
69 assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
70
71 try_scavenge(p, [] (auto) {});
72 }
73 public:
74 PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
75
76 void do_oop(oop* p) { do_oop_work(p); }
77 void do_oop(narrowOop* p) { do_oop_work(p); }
78 };
79
80 class RootScanClosure : public OffHeapScanClosure {
81 template <typename T>
82 void do_oop_work(T* p) {
83 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
84
85 try_scavenge(p, [] (auto) {});
86 }
87 public:
88 RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
89
90 void do_oop(oop* p) { do_oop_work(p); }
91 void do_oop(narrowOop* p) { do_oop_work(p); }
92 };
93
94 class CLDScanClosure: public CLDClosure {
95
96 class CLDOopClosure : public OffHeapScanClosure {
97 public:
98 // Records whether this CLD contains oops pointing into young-gen after scavenging.
99 bool _has_oops_into_young_gen;
100
101 CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
102 _has_oops_into_young_gen(false) {}
103
104 void do_oop(oop* p) {
105 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
106
107 try_scavenge(p, [&] (oop new_obj) {
108 if (!_has_oops_into_young_gen && is_in_young_gen(new_obj)) {
109 _has_oops_into_young_gen = true;
110 }
111 });
112 }
113
114 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
115 };
116
117 DefNewGeneration* _g;
118 public:
119 CLDScanClosure(DefNewGeneration* g) : _g(g) {}
120
121 void do_cld(ClassLoaderData* cld) {
122 // If the cld has not been dirtied we know that there's
123 // no references into the young gen and we can skip it.
124 if (!cld->has_modified_oops()) {
125 return;
126 }
127
128 CLDOopClosure oop_closure{_g};
129
130 // Clean the cld since we're going to scavenge all the metadata.
131 cld->oops_do(&oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
132
133 if (oop_closure._has_oops_into_young_gen) {
134 cld->record_modified_oops();
135 }
136 }
137 };
138
139 class IsAliveClosure: public BoolObjectClosure {
140 HeapWord* _young_gen_end;
141 public:
142 IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
143
144 bool do_object_b(oop p) {
145 return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
146 }
147 };
148
149 class AdjustWeakRootClosure: public OffHeapScanClosure {
150 template <class T>
151 void do_oop_work(T* p) {
152 DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
153 assert(!heap->is_in_reserved(p), "outside the heap");
154
155 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
156 if (is_in_young_gen(obj)) {
157 assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
158 assert(obj->is_forwarded(), "forwarded before weak-root-processing");
159 oop new_obj = obj->forwardee();
160 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
161 }
162 }
163 public:
164 AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
165
166 void do_oop(oop* p) { do_oop_work(p); }
167 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
168 };
169
170 class KeepAliveClosure: public OopClosure {
171 DefNewGeneration* _young_gen;
172 HeapWord* _young_gen_end;
173 CardTableRS* _rs;
174
175 bool is_in_young_gen(void* p) const {
176 return p < _young_gen_end;
177 }
178
179 template <class T>
180 void do_oop_work(T* p) {
181 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
182
183 if (is_in_young_gen(obj)) {
184 oop new_obj = obj->is_forwarded() ? obj->forwardee()
185 : _young_gen->copy_to_survivor_space(obj);
186 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
187
188 if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
189 _rs->inline_write_ref_field_gc(p);
190 }
191 }
192 }
193 public:
194 KeepAliveClosure(DefNewGeneration* g) :
195 _young_gen(g),
196 _young_gen_end(g->reserved().end()),
197 _rs(SerialHeap::heap()->rem_set()) {}
198
199 void do_oop(oop* p) { do_oop_work(p); }
200 void do_oop(narrowOop* p) { do_oop_work(p); }
201 };
202
203 class FastEvacuateFollowersClosure: public VoidClosure {
204 SerialHeap* _heap;
205 YoungGenScanClosure* _young_cl;
206 OldGenScanClosure* _old_cl;
207 public:
208 FastEvacuateFollowersClosure(SerialHeap* heap,
209 YoungGenScanClosure* young_cl,
210 OldGenScanClosure* old_cl) :
211 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
212 {}
213
214 void do_void() {
215 _heap->scan_evacuated_objs(_young_cl, _old_cl);
216 }
217 };
218
219 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
220 size_t initial_size,
221 size_t min_size,
222 size_t max_size,
223 const char* policy)
224 : Generation(rs, initial_size),
225 _promotion_failed(false),
226 _promo_failure_drain_in_progress(false),
227 _string_dedup_requests()
228 {
229 _eden_space = new ContiguousSpace();
230 _from_space = new ContiguousSpace();
231 _to_space = new ContiguousSpace();
232
233 init_spaces();
234
235 // Compute the maximum eden and survivor space sizes. These sizes
236 // are computed assuming the entire reserved space is committed.
237 // These values are exported as performance counters.
238 uintx size = _virtual_space.reserved_size();
239 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
240
241 // Eden might grow to be almost as large as the entire young generation.
242 // We approximate this as the entire virtual space.
243 _max_eden_size = size;
244
245 // allocate the performance counters
246
247 // Generation counters -- generation 0, 3 subspaces
248 _gen_counters = new GenerationCounters("new", 0, 3,
249 min_size, max_size, _virtual_space.committed_size());
250 _gc_counters = new CollectorCounters(policy, 0);
251
252 _eden_counters = new HSpaceCounters(_gen_counters->name_space(), "eden", 0,
253 _max_eden_size, _eden_space->capacity());
254 _from_counters = new HSpaceCounters(_gen_counters->name_space(), "s0", 1,
255 _max_survivor_size, _from_space->capacity());
256 _to_counters = new HSpaceCounters(_gen_counters->name_space(), "s1", 2,
257 _max_survivor_size, _to_space->capacity());
258
259 update_counters();
260 _old_gen = nullptr;
261 _tenuring_threshold = MaxTenuringThreshold;
262
263 _ref_processor = nullptr;
264
265 _gc_timer = new STWGCTimer();
266
267 _gc_tracer = new DefNewTracer();
268 }
269
270 void DefNewGeneration::init_spaces() {
271 // Using layout: from, to, eden, so only from can be non-empty.
272 assert(eden()->is_empty(), "precondition");
273 assert(to()->is_empty(), "precondition");
274
275 if (!from()->is_empty()) {
276 assert((char*) from()->bottom() == _virtual_space.low(), "inv");
277 }
278
279 // Compute sizes
280 size_t size = _virtual_space.committed_size();
281 size_t survivor_size = compute_survivor_size(size, SpaceAlignment);
282 assert(survivor_size >= from()->used(), "inv");
283 assert(size > 2 * survivor_size, "inv");
284 size_t eden_size = size - (2 * survivor_size);
285 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
286
287 // layout: from, to, eden
288 char* from_start = _virtual_space.low();
289 char* to_start = from_start + survivor_size;
290 char* eden_start = to_start + survivor_size;
291 char* eden_end = eden_start + eden_size;
292
293 assert(eden_end == _virtual_space.high(), "just checking");
294 assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
295 assert(is_aligned(to_start, SpaceAlignment), "checking alignment");
296 assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
297 assert(is_aligned(eden_end, SpaceAlignment), "checking alignment");
298
299 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
300 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)eden_start);
301 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
302
303 // Reset the spaces for their new regions.
304 from()->initialize(fromMR, from()->is_empty());
305 to()->initialize(toMR, true);
306 eden()->initialize(edenMR, true);
307
308 post_resize();
309 }
310
311 void DefNewGeneration::post_resize() {
312 MemRegion cmr((HeapWord*)_virtual_space.low(),
313 (HeapWord*)_virtual_space.high());
314 SerialHeap::heap()->rem_set()->resize_covered_region(cmr);
315 }
316
317 void DefNewGeneration::swap_spaces() {
318 ContiguousSpace* s = from();
319 _from_space = to();
320 _to_space = s;
321
322 if (UsePerfData) {
323 HSpaceCounters* c = _from_counters;
324 _from_counters = _to_counters;
325 _to_counters = c;
326 }
327 }
328
329 bool DefNewGeneration::expand(size_t bytes) {
330 assert(bytes != 0, "precondition");
331 assert(is_aligned(bytes, SpaceAlignment), "precondition");
332
333 bool success = _virtual_space.expand_by(bytes);
334 if (!success) {
335 log_info(gc)("Failed to expand young-gen by %zu bytes", bytes);
336 }
337
338 return success;
339 }
340
341 void DefNewGeneration::expand_eden_by(size_t delta_bytes) {
342 if (!expand(delta_bytes)) {
343 return;
344 }
345
346 MemRegion eden_mr{eden()->bottom(), (HeapWord*)_virtual_space.high()};
347 eden()->initialize(eden_mr, eden()->is_empty());
348
349 post_resize();
350 }
351
352 size_t DefNewGeneration::calculate_desired_young_gen_bytes() const {
353 size_t old_size = SerialHeap::heap()->old_gen()->capacity();
354 size_t new_size_before = _virtual_space.committed_size();
355 size_t min_new_size = NewSize;
356 size_t max_new_size = reserved().byte_size();
357 assert(min_new_size <= new_size_before &&
358 new_size_before <= max_new_size,
359 "just checking");
360 // All space sizes must be multiples of Generation::GenGrain.
361 size_t alignment = Generation::GenGrain;
362
363 size_t new_size_candidate = old_size / NewRatio;
364 size_t desired_new_size = align_up(new_size_candidate, alignment);
365
366 // Adjust new generation size
367 desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
368 if (!from()->is_empty()) {
369 // Mininum constraint to hold all live objs inside from-space.
370 size_t min_survivor_size = align_up(from()->used(), alignment);
371
372 // SurvivorRatio := eden_size / survivor_size
373 // young-gen-size = eden_size + 2 * survivor_size
374 // = SurvivorRatio * survivor_size + 2 * survivor_size
375 // = (SurvivorRatio + 2) * survivor_size
376 size_t min_young_gen_size = min_survivor_size * (SurvivorRatio + 2);
377
378 desired_new_size = MAX2(min_young_gen_size, desired_new_size);
379 }
380 assert(is_aligned(desired_new_size, alignment), "postcondition");
381
382 return desired_new_size;
383 }
384
385 void DefNewGeneration::resize_inner() {
386 assert(eden()->is_empty(), "precondition");
387 assert(to()->is_empty(), "precondition");
388
389 size_t current_young_gen_size_bytes = _virtual_space.committed_size();
390 size_t desired_young_gen_size_bytes = calculate_desired_young_gen_bytes();
391 if (current_young_gen_size_bytes == desired_young_gen_size_bytes) {
392 return;
393 }
394
395 // Commit/uncommit
396 if (desired_young_gen_size_bytes > current_young_gen_size_bytes) {
397 size_t delta_bytes = desired_young_gen_size_bytes - current_young_gen_size_bytes;
398 if (!expand(delta_bytes)) {
399 return;
400 }
401 } else {
402 size_t delta_bytes = current_young_gen_size_bytes - desired_young_gen_size_bytes;
403 _virtual_space.shrink_by(delta_bytes);
404 }
405
406 assert(desired_young_gen_size_bytes == _virtual_space.committed_size(), "inv");
407
408 init_spaces();
409
410 log_debug(gc, ergo, heap)("New generation size %zuK->%zuK [eden=%zuK,survivor=%zuK]",
411 current_young_gen_size_bytes/K, _virtual_space.committed_size()/K,
412 eden()->capacity()/K, from()->capacity()/K);
413 }
414
415 void DefNewGeneration::resize_after_young_gc() {
416 // Called only after successful young-gc.
417 assert(eden()->is_empty(), "precondition");
418 assert(to()->is_empty(), "precondition");
419
420 if ((char*)to()->bottom() == _virtual_space.low()) {
421 // layout: to, from, eden; can't resize.
422 return;
423 }
424
425 assert((char*)from()->bottom() == _virtual_space.low(), "inv");
426 resize_inner();
427 }
428
429 void DefNewGeneration::resize_after_full_gc() {
430 if (eden()->is_empty() && from()->is_empty() && to()->is_empty()) {
431 resize_inner();
432 return;
433 }
434
435 // Usually the young-gen is empty after full-gc.
436 // This is the extreme case; expand young-gen to its max size.
437 if (_virtual_space.uncommitted_size() == 0) {
438 // Already at its max size.
439 return;
440 }
441
442 // Keep from/to and expand eden.
443 expand_eden_by(_virtual_space.uncommitted_size());
444 }
445
446 void DefNewGeneration::ref_processor_init() {
447 assert(_ref_processor == nullptr, "a reference processor already exists");
448 assert(!_reserved.is_empty(), "empty generation?");
449 _span_based_discoverer.set_span(_reserved);
450 _ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
451 }
452
453 size_t DefNewGeneration::capacity() const {
454 return eden()->capacity()
455 + from()->capacity(); // to() is only used during scavenge
456 }
457
458 size_t DefNewGeneration::used() const {
459 return eden()->used()
460 + from()->used(); // to() is only used during scavenge
461 }
462
463 size_t DefNewGeneration::free() const {
464 return eden()->free()
465 + from()->free(); // to() is only used during scavenge
466 }
467
468 size_t DefNewGeneration::max_capacity() const {
469 const size_t reserved_bytes = reserved().byte_size();
470 const size_t min_survivor_bytes = SpaceAlignment;
471 return reserved_bytes - min_survivor_bytes;
472 }
473
474 bool DefNewGeneration::is_in(const void* p) const {
475 return eden()->is_in(p)
476 || from()->is_in(p)
477 || to() ->is_in(p);
478 }
479
480 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
481 return eden()->free();
482 }
483
484 size_t DefNewGeneration::capacity_before_gc() const {
485 return eden()->capacity();
486 }
487
488 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
489 eden()->object_iterate(blk);
490 from()->object_iterate(blk);
491 }
492
493 // If "p" is in the space, returns the address of the start of the
494 // "block" that contains "p". We say "block" instead of "object" since
495 // some heaps may not pack objects densely; a chunk may either be an
496 // object or a non-object. If "p" is not in the space, return null.
497 // Very general, slow implementation.
498 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
499 assert(MemRegion(cs->bottom(), cs->end()).contains(p),
500 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
501 p2i(p), p2i(cs->bottom()), p2i(cs->end()));
502 if (p >= cs->top()) {
503 return cs->top();
504 } else {
505 HeapWord* last = cs->bottom();
506 HeapWord* cur = last;
507 while (cur <= p) {
508 last = cur;
509 cur += cast_to_oop(cur)->size();
510 }
511 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
512 return last;
513 }
514 }
515
516 HeapWord* DefNewGeneration::block_start(const void* p) const {
517 if (eden()->is_in_reserved(p)) {
518 return block_start_const(eden(), p);
519 }
520 if (from()->is_in_reserved(p)) {
521 return block_start_const(from(), p);
522 }
523 assert(to()->is_in_reserved(p), "inv");
524 return block_start_const(to(), p);
525 }
526
527 void DefNewGeneration::adjust_desired_tenuring_threshold() {
528 // Set the desired survivor size to half the real survivor space
529 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
530 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
531
532 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
533
534 if (UsePerfData) {
535 GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
536 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
537 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
538 }
539
540 age_table()->print_age_table();
541 }
542
543 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
544 SerialHeap* heap = SerialHeap::heap();
545
546 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
547 _gc_timer->register_gc_start();
548 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
549 _ref_processor->start_discovery(clear_all_soft_refs);
550
551 _old_gen = heap->old_gen();
552
553 init_assuming_no_promotion_failure();
554
555 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
556
557 heap->trace_heap_before_gc(_gc_tracer);
558
559 // These can be shared for all code paths
560 IsAliveClosure is_alive(this);
561
562 age_table()->clear();
563
564 YoungGenScanClosure young_gen_cl(this);
565 OldGenScanClosure old_gen_cl(this);
566
567 FastEvacuateFollowersClosure evacuate_followers(heap,
568 &young_gen_cl,
569 &old_gen_cl);
570
571 {
572 RootScanClosure oop_closure{this};
573 CLDScanClosure cld_closure{this};
574
575 NMethodToOopClosure nmethod_closure(&oop_closure,
576 NMethodToOopClosure::FixRelocations);
577
578 // Starting tracing from roots, there are 4 kinds of roots in young-gc.
579 //
580 // 1. old-to-young pointers; processing them before relocating other kinds
581 // of roots.
582 _old_gen->scan_old_to_young_refs();
583
584 // 2. CLD; visit all (strong+weak) clds with the same closure, because we
585 // don't perform class unloading during young-gc.
586 ClassLoaderDataGraph::cld_do(&cld_closure);
587
588 // 3. Threads stack frames and nmethods.
589 // Only nmethods that contain pointers into-young need to be processed
590 // during young-gc, and they are tracked in ScavengableNMethods
591 Threads::oops_do(&oop_closure, nullptr);
592 ScavengableNMethods::nmethods_do(&nmethod_closure);
593
594 // 4. VM internal roots.
595 OopStorageSet::strong_oops_do(&oop_closure);
596 }
597
598 // "evacuate followers".
599 evacuate_followers.do_void();
600
601 {
602 // Reference processing
603 KeepAliveClosure keep_alive(this);
604 ReferenceProcessor* rp = ref_processor();
605 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
606 SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
607 const ReferenceProcessorStats& stats = rp->process_discovered_references(task, nullptr, pt);
608 _gc_tracer->report_gc_reference_stats(stats);
609 _gc_tracer->report_tenuring_threshold(tenuring_threshold());
610 pt.print_all_references();
611 }
612
613 {
614 AdjustWeakRootClosure cl{this};
615 WeakProcessor::weak_oops_do(&is_alive, &cl);
616 }
617
618 _string_dedup_requests.flush();
619
620 if (!_promotion_failed) {
621 // Swap the survivor spaces.
622 eden()->clear(SpaceDecorator::Mangle);
623 from()->clear(SpaceDecorator::Mangle);
624 swap_spaces();
625
626 assert(to()->is_empty(), "to space should be empty now");
627
628 adjust_desired_tenuring_threshold();
629 } else {
630 assert(_promo_failure_scan_stack.is_empty(), "post condition");
631 _promo_failure_scan_stack.clear(true); // Clear cached segments.
632
633 remove_forwarding_pointers();
634 log_info(gc, promotion)("Promotion failed");
635
636 _gc_tracer->report_promotion_failed(_promotion_failed_info);
637
638 // Reset the PromotionFailureALot counters.
639 NOT_PRODUCT(heap->reset_promotion_should_fail();)
640 }
641
642 heap->trace_heap_after_gc(_gc_tracer);
643
644 _gc_timer->register_gc_end();
645
646 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
647
648 return !_promotion_failed;
649 }
650
651 void DefNewGeneration::init_assuming_no_promotion_failure() {
652 _promotion_failed = false;
653 _promotion_failed_info.reset();
654 }
655
656 void DefNewGeneration::remove_forwarding_pointers() {
657 assert(_promotion_failed, "precondition");
658
659 // Will enter Full GC soon due to failed promotion. Must reset the mark word
660 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
661 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
662 struct ResetForwardedMarkWord : ObjectClosure {
663 void do_object(oop obj) override {
664 obj->reset_forwarded();
665 }
666 } cl;
667 eden()->object_iterate(&cl);
668 from()->object_iterate(&cl);
669 }
670
671 void DefNewGeneration::handle_promotion_failure(oop old) {
672 log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
673
674 _promotion_failed = true;
675 _promotion_failed_info.register_copy_failure(old->size());
676
677 ContinuationGCSupport::transform_stack_chunk(old);
678
679 // forward to self
680 old->forward_to_self();
681
682 _promo_failure_scan_stack.push(old);
683
684 if (!_promo_failure_drain_in_progress) {
685 // prevent recursion in copy_to_survivor_space()
686 _promo_failure_drain_in_progress = true;
687 drain_promo_failure_scan_stack();
688 _promo_failure_drain_in_progress = false;
689 }
690 }
691
692 oop DefNewGeneration::copy_to_survivor_space(oop old) {
693 assert(is_in_reserved(old) && !old->is_forwarded(),
694 "shouldn't be scavenging this oop");
695 size_t old_size = old->size();
696 size_t s = old->copy_size(old_size, old->mark());
697
698 oop obj = nullptr;
699
700 // Try allocating obj in to-space (unless too old)
701 if (old->age() < tenuring_threshold()) {
702 obj = cast_to_oop(to()->allocate(s));
703 }
704
705 bool new_obj_is_tenured = false;
706 // Otherwise try allocating obj tenured
707 if (obj == nullptr) {
708 obj = _old_gen->allocate_for_promotion(old, s);
709 if (obj == nullptr) {
710 handle_promotion_failure(old);
711 return old;
712 }
713
714 new_obj_is_tenured = true;
715 }
716
717 // Prefetch beyond obj
718 const intx interval = PrefetchCopyIntervalInBytes;
719 Prefetch::write(obj, interval);
720
721 // Copy obj
722 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), old_size);
723
724 ContinuationGCSupport::transform_stack_chunk(obj);
725
726 if (!new_obj_is_tenured) {
727 // Increment age if obj still in new generation
728 obj->incr_age();
729 age_table()->add(obj, s);
730 }
731
732 obj->initialize_hash_if_necessary(old);
733
734 // Done, insert forward pointer to obj in this header
735 old->forward_to(obj);
736
737 if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
738 // Record old; request adds a new weak reference, which reference
739 // processing expects to refer to a from-space object.
740 _string_dedup_requests.add(old);
741 }
742 return obj;
743 }
744
745 void DefNewGeneration::drain_promo_failure_scan_stack() {
746 PromoteFailureClosure cl{this};
747 while (!_promo_failure_scan_stack.is_empty()) {
748 oop obj = _promo_failure_scan_stack.pop();
749 obj->oop_iterate(&cl);
750 }
751 }
752
753 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
754 if (_promotion_failed) {
755 return;
756 }
757
758 const size_t MinFreeScratchWords = 100;
759
760 ContiguousSpace* to_space = to();
761 const size_t free_words = pointer_delta(to_space->end(), to_space->top());
762 if (free_words >= MinFreeScratchWords) {
763 scratch = to_space->top();
764 num_words = free_words;
765 }
766 }
767
768 void DefNewGeneration::reset_scratch() {
769 // If contributing scratch in to_space, mangle all of
770 // to_space if ZapUnusedHeapArea. This is needed because
771 // top is not maintained while using to-space as scratch.
772 if (ZapUnusedHeapArea) {
773 to()->mangle_unused_area();
774 }
775 }
776
777 void DefNewGeneration::gc_epilogue() {
778 assert(!GCLocker::is_active(), "We should not be executing here");
779 // update the generation and space performance counters
780 update_counters();
781 }
782
783 void DefNewGeneration::update_counters() {
784 if (UsePerfData) {
785 _eden_counters->update_all(_eden_space->capacity(), _eden_space->used());
786 _from_counters->update_all(_from_space->capacity(), _from_space->used());
787 _to_counters->update_all(_to_space->capacity(), _to_space->used());
788 _gen_counters->update_capacity(_virtual_space.committed_size());
789 }
790 }
791
792 void DefNewGeneration::verify() {
793 eden()->verify();
794 from()->verify();
795 to()->verify();
796 }
797
798 void DefNewGeneration::print_on(outputStream* st) const {
799 st->print("%-10s", name());
800
801 st->print(" total %zuK, used %zuK ", capacity() / K, used() / K);
802 _virtual_space.print_space_boundaries_on(st);
803
804 StreamIndentor si(st, 1);
805 eden()->print_on(st, "eden ");
806 from()->print_on(st, "from ");
807 to()->print_on(st, "to ");
808 }
809
810 HeapWord* DefNewGeneration::expand_and_allocate(size_t word_size) {
811 assert(Heap_lock->is_locked(), "precondition");
812
813 size_t eden_free_bytes = eden()->free();
814 size_t requested_bytes = word_size * HeapWordSize;
815 if (eden_free_bytes < requested_bytes) {
816 size_t expand_bytes = requested_bytes - eden_free_bytes;
817 expand_eden_by(align_up(expand_bytes, SpaceAlignment));
818 }
819
820 HeapWord* result = eden()->allocate(word_size);
821 return result;
822 }
823
824 HeapWord* DefNewGeneration::par_allocate(size_t word_size) {
825 return eden()->par_allocate(word_size);
826 }
827
828 size_t DefNewGeneration::tlab_capacity() const {
829 return eden()->capacity();
830 }
831
832 size_t DefNewGeneration::tlab_used() const {
833 return eden()->used();
834 }
835
836 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
837 return unsafe_max_alloc_nogc();
838 }