1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/serial/cardTableRS.hpp"
26 #include "gc/serial/serialGcRefProcProxyTask.hpp"
27 #include "gc/serial/serialHeap.inline.hpp"
28 #include "gc/serial/serialStringDedup.inline.hpp"
29 #include "gc/serial/tenuredGeneration.hpp"
30 #include "gc/shared/adaptiveSizePolicy.hpp"
31 #include "gc/shared/ageTable.inline.hpp"
32 #include "gc/shared/collectorCounters.hpp"
33 #include "gc/shared/continuationGCSupport.inline.hpp"
34 #include "gc/shared/gcArguments.hpp"
35 #include "gc/shared/gcHeapSummary.hpp"
36 #include "gc/shared/gcLocker.hpp"
37 #include "gc/shared/gcPolicyCounters.hpp"
38 #include "gc/shared/gcTimer.hpp"
39 #include "gc/shared/gcTrace.hpp"
40 #include "gc/shared/gcTraceTime.inline.hpp"
41 #include "gc/shared/referencePolicy.hpp"
42 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
43 #include "gc/shared/space.hpp"
44 #include "gc/shared/spaceDecorator.hpp"
45 #include "gc/shared/strongRootsScope.hpp"
46 #include "gc/shared/weakProcessor.hpp"
47 #include "logging/log.hpp"
48 #include "memory/iterator.inline.hpp"
49 #include "memory/reservedSpace.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "oops/instanceRefKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "runtime/java.hpp"
54 #include "runtime/javaThread.hpp"
55 #include "runtime/prefetch.inline.hpp"
56 #include "runtime/threads.hpp"
57 #include "utilities/align.hpp"
58 #include "utilities/copy.hpp"
59 #include "utilities/globalDefinitions.hpp"
60 #include "utilities/stack.inline.hpp"
61
62 class PromoteFailureClosure : public InHeapScanClosure {
63 template <typename T>
64 void do_oop_work(T* p) {
65 assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
66 assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
67
68 try_scavenge(p, [] (auto) {});
69 }
70 public:
71 PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
72
73 void do_oop(oop* p) { do_oop_work(p); }
74 void do_oop(narrowOop* p) { do_oop_work(p); }
75 };
76
77 class RootScanClosure : public OffHeapScanClosure {
78 template <typename T>
79 void do_oop_work(T* p) {
80 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
81
82 try_scavenge(p, [] (auto) {});
83 }
84 public:
85 RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
86
87 void do_oop(oop* p) { do_oop_work(p); }
88 void do_oop(narrowOop* p) { do_oop_work(p); }
89 };
90
91 class CLDScanClosure: public CLDClosure {
92
93 class CLDOopClosure : public OffHeapScanClosure {
94 ClassLoaderData* _scanned_cld;
95
96 template <typename T>
97 void do_oop_work(T* p) {
98 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
99
100 try_scavenge(p, [&] (oop new_obj) {
101 assert(_scanned_cld != nullptr, "inv");
102 if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
103 _scanned_cld->record_modified_oops();
104 }
105 });
106 }
107
108 public:
109 CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
110 _scanned_cld(nullptr) {}
111
112 void set_scanned_cld(ClassLoaderData* cld) {
113 assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
114 _scanned_cld = cld;
115 }
116
117 void do_oop(oop* p) { do_oop_work(p); }
118 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
119 };
120
121 CLDOopClosure _oop_closure;
122 public:
123 CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
124
125 void do_cld(ClassLoaderData* cld) {
126 // If the cld has not been dirtied we know that there's
127 // no references into the young gen and we can skip it.
128 if (cld->has_modified_oops()) {
129
130 // Tell the closure which CLD is being scanned so that it can be dirtied
131 // if oops are left pointing into the young gen.
132 _oop_closure.set_scanned_cld(cld);
133
134 // Clean the cld since we're going to scavenge all the metadata.
135 cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
136
137 _oop_closure.set_scanned_cld(nullptr);
138 }
139 }
140 };
141
142 class IsAliveClosure: public BoolObjectClosure {
143 HeapWord* _young_gen_end;
144 public:
145 IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
146
147 bool do_object_b(oop p) {
148 return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
149 }
150 };
151
152 class AdjustWeakRootClosure: public OffHeapScanClosure {
153 template <class T>
154 void do_oop_work(T* p) {
155 DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
156 assert(!heap->is_in_reserved(p), "outside the heap");
157
158 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
159 if (is_in_young_gen(obj)) {
160 assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
161 assert(obj->is_forwarded(), "forwarded before weak-root-processing");
162 oop new_obj = obj->forwardee();
163 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
164 }
165 }
166 public:
167 AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
168
169 void do_oop(oop* p) { do_oop_work(p); }
170 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
171 };
172
173 class KeepAliveClosure: public OopClosure {
174 DefNewGeneration* _young_gen;
175 HeapWord* _young_gen_end;
176 CardTableRS* _rs;
177
178 bool is_in_young_gen(void* p) const {
179 return p < _young_gen_end;
180 }
181
182 template <class T>
183 void do_oop_work(T* p) {
184 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
185
186 if (is_in_young_gen(obj)) {
187 oop new_obj = obj->is_forwarded() ? obj->forwardee()
188 : _young_gen->copy_to_survivor_space(obj);
189 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
190
191 if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
192 _rs->inline_write_ref_field_gc(p);
193 }
194 }
195 }
196 public:
197 KeepAliveClosure(DefNewGeneration* g) :
198 _young_gen(g),
199 _young_gen_end(g->reserved().end()),
200 _rs(SerialHeap::heap()->rem_set()) {}
201
202 void do_oop(oop* p) { do_oop_work(p); }
203 void do_oop(narrowOop* p) { do_oop_work(p); }
204 };
205
206 class FastEvacuateFollowersClosure: public VoidClosure {
207 SerialHeap* _heap;
208 YoungGenScanClosure* _young_cl;
209 OldGenScanClosure* _old_cl;
210 public:
211 FastEvacuateFollowersClosure(SerialHeap* heap,
212 YoungGenScanClosure* young_cl,
213 OldGenScanClosure* old_cl) :
214 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
215 {}
216
217 void do_void() {
218 _heap->scan_evacuated_objs(_young_cl, _old_cl);
219 }
220 };
221
222 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
223 size_t initial_size,
224 size_t min_size,
225 size_t max_size,
226 const char* policy)
227 : Generation(rs, initial_size),
228 _promotion_failed(false),
229 _promo_failure_drain_in_progress(false),
230 _string_dedup_requests()
231 {
232 MemRegion cmr((HeapWord*)_virtual_space.low(),
233 (HeapWord*)_virtual_space.high());
234 SerialHeap* gch = SerialHeap::heap();
235
236 gch->rem_set()->resize_covered_region(cmr);
237
238 _eden_space = new ContiguousSpace();
239 _from_space = new ContiguousSpace();
240 _to_space = new ContiguousSpace();
241
242 // Compute the maximum eden and survivor space sizes. These sizes
243 // are computed assuming the entire reserved space is committed.
244 // These values are exported as performance counters.
245 uintx size = _virtual_space.reserved_size();
246 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
247 _max_eden_size = size - (2*_max_survivor_size);
248
249 // allocate the performance counters
250
251 // Generation counters -- generation 0, 3 subspaces
252 _gen_counters = new GenerationCounters("new", 0, 3,
253 min_size, max_size, _virtual_space.committed_size());
254 _gc_counters = new CollectorCounters(policy, 0);
255
256 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
257 _gen_counters);
258 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
259 _gen_counters);
260 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
261 _gen_counters);
262
263 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
264 update_counters();
265 _old_gen = nullptr;
266 _tenuring_threshold = MaxTenuringThreshold;
267 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
268
269 _ref_processor = nullptr;
270
271 _gc_timer = new STWGCTimer();
272
273 _gc_tracer = new DefNewTracer();
274 }
275
276 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
277 bool clear_space,
278 bool mangle_space) {
279 // If the spaces are being cleared (only done at heap initialization
280 // currently), the survivor spaces need not be empty.
281 // Otherwise, no care is taken for used areas in the survivor spaces
282 // so check.
283 assert(clear_space || (to()->is_empty() && from()->is_empty()),
284 "Initialization of the survivor spaces assumes these are empty");
285
286 // Compute sizes
287 uintx size = _virtual_space.committed_size();
288 uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
289 uintx eden_size = size - (2*survivor_size);
290 if (eden_size > max_eden_size()) {
291 // Need to reduce eden_size to satisfy the max constraint. The delta needs
292 // to be 2*SpaceAlignment aligned so that both survivors are properly
293 // aligned.
294 uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
295 eden_size -= eden_delta;
296 survivor_size += eden_delta/2;
297 }
298 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
299
300 if (eden_size < minimum_eden_size) {
301 // May happen due to 64Kb rounding, if so adjust eden size back up
302 minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
303 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
304 uintx unaligned_survivor_size =
305 align_down(maximum_survivor_size, SpaceAlignment);
306 survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
307 eden_size = size - (2*survivor_size);
308 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
309 assert(eden_size >= minimum_eden_size, "just checking");
310 }
311
312 char *eden_start = _virtual_space.low();
313 char *from_start = eden_start + eden_size;
314 char *to_start = from_start + survivor_size;
315 char *to_end = to_start + survivor_size;
316
317 assert(to_end == _virtual_space.high(), "just checking");
318 assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
319 assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
320 assert(is_aligned(to_start, SpaceAlignment), "checking alignment");
321
322 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
323 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
324 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
325
326 // A minimum eden size implies that there is a part of eden that
327 // is being used and that affects the initialization of any
328 // newly formed eden.
329 bool live_in_eden = minimum_eden_size > 0;
330
331 // Reset the spaces for their new regions.
332 eden()->initialize(edenMR,
333 clear_space && !live_in_eden,
334 SpaceDecorator::Mangle);
335 // If clear_space and live_in_eden, we will not have cleared any
336 // portion of eden above its top. This can cause newly
337 // expanded space not to be mangled if using ZapUnusedHeapArea.
338 // We explicitly do such mangling here.
339 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
340 eden()->mangle_unused_area();
341 }
342 from()->initialize(fromMR, clear_space, mangle_space);
343 to()->initialize(toMR, clear_space, mangle_space);
344 }
345
346 void DefNewGeneration::swap_spaces() {
347 ContiguousSpace* s = from();
348 _from_space = to();
349 _to_space = s;
350
351 if (UsePerfData) {
352 CSpaceCounters* c = _from_counters;
353 _from_counters = _to_counters;
354 _to_counters = c;
355 }
356 }
357
358 bool DefNewGeneration::expand(size_t bytes) {
359 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
360 bool success = _virtual_space.expand_by(bytes);
361 if (success && ZapUnusedHeapArea) {
362 // Mangle newly committed space immediately because it
363 // can be done here more simply that after the new
364 // spaces have been computed.
365 HeapWord* new_high = (HeapWord*) _virtual_space.high();
366 MemRegion mangle_region(prev_high, new_high);
367 SpaceMangler::mangle_region(mangle_region);
368 }
369
370 return success;
371 }
372
373 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
374 size_t thread_increase_size = 0;
375 // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
376 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
377 thread_increase_size = threads_count * NewSizeThreadIncrease;
378 }
379 return thread_increase_size;
380 }
381
382 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
383 size_t new_size_before,
384 size_t alignment,
385 size_t thread_increase_size) const {
386 size_t desired_new_size = new_size_before;
387
388 if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
389
390 // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
391 if (new_size_candidate <= max_uintx - thread_increase_size) {
392 new_size_candidate += thread_increase_size;
393
394 // 2. Check an overflow at 'align_up'.
395 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
396 if (new_size_candidate <= aligned_max) {
397 desired_new_size = align_up(new_size_candidate, alignment);
398 }
399 }
400 }
401
402 return desired_new_size;
403 }
404
405 void DefNewGeneration::compute_new_size() {
406 // This is called after a GC that includes the old generation, so from-space
407 // will normally be empty.
408 // Note that we check both spaces, since if scavenge failed they revert roles.
409 // If not we bail out (otherwise we would have to relocate the objects).
410 if (!from()->is_empty() || !to()->is_empty()) {
411 return;
412 }
413
414 SerialHeap* gch = SerialHeap::heap();
415
416 size_t old_size = gch->old_gen()->capacity();
417 size_t new_size_before = _virtual_space.committed_size();
418 size_t min_new_size = NewSize;
419 size_t max_new_size = reserved().byte_size();
420 assert(min_new_size <= new_size_before &&
421 new_size_before <= max_new_size,
422 "just checking");
423 // All space sizes must be multiples of Generation::GenGrain.
424 size_t alignment = Generation::GenGrain;
425
426 int threads_count = Threads::number_of_non_daemon_threads();
427 size_t thread_increase_size = calculate_thread_increase_size(threads_count);
428
429 size_t new_size_candidate = old_size / NewRatio;
430 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
431 // and reverts to previous value if any overflow happens
432 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
433 alignment, thread_increase_size);
434
435 // Adjust new generation size
436 desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
437 assert(desired_new_size <= max_new_size, "just checking");
438
439 bool changed = false;
440 if (desired_new_size > new_size_before) {
441 size_t change = desired_new_size - new_size_before;
442 assert(change % alignment == 0, "just checking");
443 if (expand(change)) {
444 changed = true;
445 }
446 // If the heap failed to expand to the desired size,
447 // "changed" will be false. If the expansion failed
448 // (and at this point it was expected to succeed),
449 // ignore the failure (leaving "changed" as false).
450 }
451 if (desired_new_size < new_size_before && eden()->is_empty()) {
452 // bail out of shrinking if objects in eden
453 size_t change = new_size_before - desired_new_size;
454 assert(change % alignment == 0, "just checking");
455 _virtual_space.shrink_by(change);
456 changed = true;
457 }
458 if (changed) {
459 // The spaces have already been mangled at this point but
460 // may not have been cleared (set top = bottom) and should be.
461 // Mangling was done when the heap was being expanded.
462 compute_space_boundaries(eden()->used(),
463 SpaceDecorator::Clear,
464 SpaceDecorator::DontMangle);
465 MemRegion cmr((HeapWord*)_virtual_space.low(),
466 (HeapWord*)_virtual_space.high());
467 gch->rem_set()->resize_covered_region(cmr);
468
469 log_debug(gc, ergo, heap)(
470 "New generation size %zuK->%zuK [eden=%zuK,survivor=%zuK]",
471 new_size_before/K, _virtual_space.committed_size()/K,
472 eden()->capacity()/K, from()->capacity()/K);
473 log_trace(gc, ergo, heap)(
474 " [allowed %zuK extra for %d threads]",
475 thread_increase_size/K, threads_count);
476 }
477 }
478
479 void DefNewGeneration::ref_processor_init() {
480 assert(_ref_processor == nullptr, "a reference processor already exists");
481 assert(!_reserved.is_empty(), "empty generation?");
482 _span_based_discoverer.set_span(_reserved);
483 _ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
484 }
485
486 size_t DefNewGeneration::capacity() const {
487 return eden()->capacity()
488 + from()->capacity(); // to() is only used during scavenge
489 }
490
491
492 size_t DefNewGeneration::used() const {
493 return eden()->used()
494 + from()->used(); // to() is only used during scavenge
495 }
496
497
498 size_t DefNewGeneration::free() const {
499 return eden()->free()
500 + from()->free(); // to() is only used during scavenge
501 }
502
503 size_t DefNewGeneration::max_capacity() const {
504 const size_t reserved_bytes = reserved().byte_size();
505 return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
506 }
507
508 bool DefNewGeneration::is_in(const void* p) const {
509 return eden()->is_in(p)
510 || from()->is_in(p)
511 || to() ->is_in(p);
512 }
513
514 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
515 return eden()->free();
516 }
517
518 size_t DefNewGeneration::capacity_before_gc() const {
519 return eden()->capacity();
520 }
521
522 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
523 eden()->object_iterate(blk);
524 from()->object_iterate(blk);
525 }
526
527 // If "p" is in the space, returns the address of the start of the
528 // "block" that contains "p". We say "block" instead of "object" since
529 // some heaps may not pack objects densely; a chunk may either be an
530 // object or a non-object. If "p" is not in the space, return null.
531 // Very general, slow implementation.
532 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
533 assert(MemRegion(cs->bottom(), cs->end()).contains(p),
534 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
535 p2i(p), p2i(cs->bottom()), p2i(cs->end()));
536 if (p >= cs->top()) {
537 return cs->top();
538 } else {
539 HeapWord* last = cs->bottom();
540 HeapWord* cur = last;
541 while (cur <= p) {
542 last = cur;
543 cur += cast_to_oop(cur)->size();
544 }
545 assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
546 return last;
547 }
548 }
549
550 HeapWord* DefNewGeneration::block_start(const void* p) const {
551 if (eden()->is_in_reserved(p)) {
552 return block_start_const(eden(), p);
553 }
554 if (from()->is_in_reserved(p)) {
555 return block_start_const(from(), p);
556 }
557 assert(to()->is_in_reserved(p), "inv");
558 return block_start_const(to(), p);
559 }
560
561 void DefNewGeneration::adjust_desired_tenuring_threshold() {
562 // Set the desired survivor size to half the real survivor space
563 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
564 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
565
566 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
567
568 if (UsePerfData) {
569 GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
570 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
571 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
572 }
573
574 age_table()->print_age_table();
575 }
576
577 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
578 SerialHeap* heap = SerialHeap::heap();
579
580 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
581 _gc_timer->register_gc_start();
582 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
583 _ref_processor->start_discovery(clear_all_soft_refs);
584
585 _old_gen = heap->old_gen();
586
587 init_assuming_no_promotion_failure();
588
589 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
590
591 heap->trace_heap_before_gc(_gc_tracer);
592
593 // These can be shared for all code paths
594 IsAliveClosure is_alive(this);
595
596 age_table()->clear();
597 to()->clear(SpaceDecorator::Mangle);
598
599 YoungGenScanClosure young_gen_cl(this);
600 OldGenScanClosure old_gen_cl(this);
601
602 FastEvacuateFollowersClosure evacuate_followers(heap,
603 &young_gen_cl,
604 &old_gen_cl);
605
606 {
607 StrongRootsScope srs(0);
608 RootScanClosure root_cl{this};
609 CLDScanClosure cld_cl{this};
610
611 MarkingNMethodClosure code_cl(&root_cl,
612 NMethodToOopClosure::FixRelocations,
613 false /* keepalive_nmethods */);
614
615 HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
616 heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
617 &root_cl,
618 &cld_cl,
619 &cld_cl,
620 &code_cl);
621
622 _old_gen->scan_old_to_young_refs(saved_top_in_old_gen);
623 }
624
625 // "evacuate followers".
626 evacuate_followers.do_void();
627
628 {
629 // Reference processing
630 KeepAliveClosure keep_alive(this);
631 ReferenceProcessor* rp = ref_processor();
632 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
633 SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
634 const ReferenceProcessorStats& stats = rp->process_discovered_references(task, nullptr, pt);
635 _gc_tracer->report_gc_reference_stats(stats);
636 _gc_tracer->report_tenuring_threshold(tenuring_threshold());
637 pt.print_all_references();
638 }
639
640 {
641 AdjustWeakRootClosure cl{this};
642 WeakProcessor::weak_oops_do(&is_alive, &cl);
643 }
644
645 _string_dedup_requests.flush();
646
647 if (!_promotion_failed) {
648 // Swap the survivor spaces.
649 eden()->clear(SpaceDecorator::Mangle);
650 from()->clear(SpaceDecorator::Mangle);
651 swap_spaces();
652
653 assert(to()->is_empty(), "to space should be empty now");
654
655 adjust_desired_tenuring_threshold();
656 } else {
657 assert(_promo_failure_scan_stack.is_empty(), "post condition");
658 _promo_failure_scan_stack.clear(true); // Clear cached segments.
659
660 remove_forwarding_pointers();
661 log_info(gc, promotion)("Promotion failed");
662
663 _gc_tracer->report_promotion_failed(_promotion_failed_info);
664
665 // Reset the PromotionFailureALot counters.
666 NOT_PRODUCT(heap->reset_promotion_should_fail();)
667 }
668
669 heap->trace_heap_after_gc(_gc_tracer);
670
671 _gc_timer->register_gc_end();
672
673 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
674
675 return !_promotion_failed;
676 }
677
678 void DefNewGeneration::init_assuming_no_promotion_failure() {
679 _promotion_failed = false;
680 _promotion_failed_info.reset();
681 }
682
683 void DefNewGeneration::remove_forwarding_pointers() {
684 assert(_promotion_failed, "precondition");
685
686 // Will enter Full GC soon due to failed promotion. Must reset the mark word
687 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
688 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
689 struct ResetForwardedMarkWord : ObjectClosure {
690 void do_object(oop obj) override {
691 if (obj->is_self_forwarded()) {
692 obj->unset_self_forwarded();
693 } else if (obj->is_forwarded()) {
694 // To restore the klass-bits in the header.
695 // Needed for object iteration to work properly.
696 obj->set_mark(obj->forwardee()->prototype_mark());
697 }
698 }
699 } cl;
700 eden()->object_iterate(&cl);
701 from()->object_iterate(&cl);
702 }
703
704 void DefNewGeneration::handle_promotion_failure(oop old) {
705 log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
706
707 _promotion_failed = true;
708 _promotion_failed_info.register_copy_failure(old->size());
709
710 ContinuationGCSupport::transform_stack_chunk(old);
711
712 // forward to self
713 old->forward_to_self();
714
715 _promo_failure_scan_stack.push(old);
716
717 if (!_promo_failure_drain_in_progress) {
718 // prevent recursion in copy_to_survivor_space()
719 _promo_failure_drain_in_progress = true;
720 drain_promo_failure_scan_stack();
721 _promo_failure_drain_in_progress = false;
722 }
723 }
724
725 oop DefNewGeneration::copy_to_survivor_space(oop old) {
726 assert(is_in_reserved(old) && !old->is_forwarded(),
727 "shouldn't be scavenging this oop");
728 size_t s = old->size();
729 oop obj = nullptr;
730
731 // Try allocating obj in to-space (unless too old)
732 if (old->age() < tenuring_threshold()) {
733 obj = cast_to_oop(to()->allocate(s));
734 }
735
736 bool new_obj_is_tenured = false;
737 // Otherwise try allocating obj tenured
738 if (obj == nullptr) {
739 obj = _old_gen->allocate_for_promotion(old, s);
740 if (obj == nullptr) {
741 handle_promotion_failure(old);
742 return old;
743 }
744
745 new_obj_is_tenured = true;
746 }
747
748 // Prefetch beyond obj
749 const intx interval = PrefetchCopyIntervalInBytes;
750 Prefetch::write(obj, interval);
751
752 // Copy obj
753 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
754
755 ContinuationGCSupport::transform_stack_chunk(obj);
756
757 if (!new_obj_is_tenured) {
758 // Increment age if obj still in new generation
759 obj->incr_age();
760 age_table()->add(obj, s);
761 }
762
763 // Done, insert forward pointer to obj in this header
764 old->forward_to(obj);
765
766 if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
767 // Record old; request adds a new weak reference, which reference
768 // processing expects to refer to a from-space object.
769 _string_dedup_requests.add(old);
770 }
771 return obj;
772 }
773
774 void DefNewGeneration::drain_promo_failure_scan_stack() {
775 PromoteFailureClosure cl{this};
776 while (!_promo_failure_scan_stack.is_empty()) {
777 oop obj = _promo_failure_scan_stack.pop();
778 obj->oop_iterate(&cl);
779 }
780 }
781
782 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
783 if (_promotion_failed) {
784 return;
785 }
786
787 const size_t MinFreeScratchWords = 100;
788
789 ContiguousSpace* to_space = to();
790 const size_t free_words = pointer_delta(to_space->end(), to_space->top());
791 if (free_words >= MinFreeScratchWords) {
792 scratch = to_space->top();
793 num_words = free_words;
794 }
795 }
796
797 void DefNewGeneration::reset_scratch() {
798 // If contributing scratch in to_space, mangle all of
799 // to_space if ZapUnusedHeapArea. This is needed because
800 // top is not maintained while using to-space as scratch.
801 if (ZapUnusedHeapArea) {
802 to()->mangle_unused_area();
803 }
804 }
805
806 void DefNewGeneration::gc_epilogue(bool full) {
807 assert(!GCLocker::is_active(), "We should not be executing here");
808 // update the generation and space performance counters
809 update_counters();
810 }
811
812 void DefNewGeneration::update_counters() {
813 if (UsePerfData) {
814 _eden_counters->update_all();
815 _from_counters->update_all();
816 _to_counters->update_all();
817 _gen_counters->update_capacity(_virtual_space.committed_size());
818 }
819 }
820
821 void DefNewGeneration::verify() {
822 eden()->verify();
823 from()->verify();
824 to()->verify();
825 }
826
827 void DefNewGeneration::print_on(outputStream* st) const {
828 st->print("%-10s", name());
829
830 st->print(" total %zuK, used %zuK ", capacity() / K, used() / K);
831 _virtual_space.print_space_boundaries_on(st);
832
833 StreamIndentor si(st, 1);
834 eden()->print_on(st, "eden ");
835 from()->print_on(st, "from ");
836 to()->print_on(st, "to ");
837 }
838
839 HeapWord* DefNewGeneration::allocate(size_t word_size) {
840 // This is the slow-path allocation for the DefNewGeneration.
841 // Most allocations are fast-path in compiled code.
842 // We try to allocate from the eden. If that works, we are happy.
843 // Note that since DefNewGeneration supports lock-free allocation, we
844 // have to use it here, as well.
845 HeapWord* result = eden()->par_allocate(word_size);
846 return result;
847 }
848
849 HeapWord* DefNewGeneration::par_allocate(size_t word_size) {
850 return eden()->par_allocate(word_size);
851 }
852
853 size_t DefNewGeneration::tlab_capacity() const {
854 return eden()->capacity();
855 }
856
857 size_t DefNewGeneration::tlab_used() const {
858 return eden()->used();
859 }
860
861 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
862 return unsafe_max_alloc_nogc();
863 }