1 /*
2 * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/cardTableRS.hpp"
27 #include "gc/serial/defNewGeneration.inline.hpp"
28 #include "gc/serial/serialGcRefProcProxyTask.hpp"
29 #include "gc/serial/serialHeap.inline.hpp"
30 #include "gc/serial/serialStringDedup.inline.hpp"
31 #include "gc/serial/tenuredGeneration.hpp"
32 #include "gc/shared/adaptiveSizePolicy.hpp"
33 #include "gc/shared/ageTable.inline.hpp"
34 #include "gc/shared/collectorCounters.hpp"
35 #include "gc/shared/continuationGCSupport.inline.hpp"
36 #include "gc/shared/gcArguments.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcLocker.hpp"
39 #include "gc/shared/gcPolicyCounters.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/generationSpec.hpp"
44 #include "gc/shared/preservedMarks.inline.hpp"
45 #include "gc/shared/referencePolicy.hpp"
46 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
47 #include "gc/shared/space.inline.hpp"
48 #include "gc/shared/spaceDecorator.inline.hpp"
49 #include "gc/shared/strongRootsScope.hpp"
50 #include "gc/shared/weakProcessor.hpp"
51 #include "logging/log.hpp"
52 #include "memory/iterator.inline.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "oops/instanceRefKlass.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "runtime/java.hpp"
57 #include "runtime/javaThread.hpp"
58 #include "runtime/prefetch.inline.hpp"
59 #include "runtime/threads.hpp"
60 #include "utilities/align.hpp"
61 #include "utilities/copy.hpp"
62 #include "utilities/globalDefinitions.hpp"
63 #include "utilities/stack.inline.hpp"
64
65 class ScavengeHelper {
66 DefNewGeneration* _young_gen;
67 HeapWord* _young_gen_end;
68 public:
69 ScavengeHelper(DefNewGeneration* young_gen) :
70 _young_gen(young_gen),
71 _young_gen_end(young_gen->reserved().end()) {}
72
73 bool is_in_young_gen(void* p) const {
74 return p < _young_gen_end;
75 }
76
77 template <typename T, typename Func>
78 void try_scavenge(T* p, Func&& f) {
79 T heap_oop = RawAccess<>::oop_load(p);
80 // Should we copy the obj?
81 if (!CompressedOops::is_null(heap_oop)) {
82 oop obj = CompressedOops::decode_not_null(heap_oop);
83 if (is_in_young_gen(obj)) {
84 assert(!_young_gen->to()->is_in_reserved(obj), "Scanning field twice?");
85 oop new_obj = obj->is_forwarded() ? obj->forwardee()
86 : _young_gen->copy_to_survivor_space(obj);
87 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
88
89 // callback
90 f(new_obj);
91 }
92 }
93 }
94 };
95
96 class InHeapScanClosure : public BasicOopIterateClosure {
97 ScavengeHelper _helper;
98 protected:
99 bool is_in_young_gen(void* p) const {
100 return _helper.is_in_young_gen(p);
101 }
102
103 template <typename T, typename Func>
104 void try_scavenge(T* p, Func&& f) {
105 _helper.try_scavenge(p, f);
106 }
107
108 InHeapScanClosure(DefNewGeneration* young_gen) :
109 BasicOopIterateClosure(young_gen->ref_processor()),
110 _helper(young_gen) {}
111 };
112
113 class OffHeapScanClosure : public OopClosure {
114 ScavengeHelper _helper;
115 protected:
116 bool is_in_young_gen(void* p) const {
117 return _helper.is_in_young_gen(p);
118 }
119
120 template <typename T, typename Func>
121 void try_scavenge(T* p, Func&& f) {
122 _helper.try_scavenge(p, f);
123 }
124
125 OffHeapScanClosure(DefNewGeneration* young_gen) : _helper(young_gen) {}
126 };
127
128 class OldGenScanClosure : public InHeapScanClosure {
129 CardTableRS* _rs;
130
131 template <typename T>
132 void do_oop_work(T* p) {
133 assert(!is_in_young_gen(p), "precondition");
134
135 try_scavenge(p, [&] (oop new_obj) {
136 // If p points to a younger generation, mark the card.
137 if (is_in_young_gen(new_obj)) {
138 _rs->inline_write_ref_field_gc(p);
139 }
140 });
141 }
142 public:
143 OldGenScanClosure(DefNewGeneration* g) : InHeapScanClosure(g),
144 _rs(SerialHeap::heap()->rem_set()) {}
145
146 void do_oop(oop* p) { do_oop_work(p); }
147 void do_oop(narrowOop* p) { do_oop_work(p); }
148 };
149
150 class PromoteFailureClosure : public InHeapScanClosure {
151 template <typename T>
152 void do_oop_work(T* p) {
153 assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
154 assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
155
156 try_scavenge(p, [] (auto) {});
157 }
158 public:
159 PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
160
161 void do_oop(oop* p) { do_oop_work(p); }
162 void do_oop(narrowOop* p) { do_oop_work(p); }
163 };
164
165 class YoungGenScanClosure : public InHeapScanClosure {
166 template <typename T>
167 void do_oop_work(T* p) {
168 assert(SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "precondition");
169
170 try_scavenge(p, [] (auto) {});
171 }
172 public:
173 YoungGenScanClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
174
175 void do_oop(oop* p) { do_oop_work(p); }
176 void do_oop(narrowOop* p) { do_oop_work(p); }
177 };
178
179 class RootScanClosure : public OffHeapScanClosure {
180 template <typename T>
181 void do_oop_work(T* p) {
182 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
183
184 try_scavenge(p, [] (auto) {});
185 }
186 public:
187 RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
188
189 void do_oop(oop* p) { do_oop_work(p); }
190 void do_oop(narrowOop* p) { do_oop_work(p); }
191 };
192
193 class CLDScanClosure: public CLDClosure {
194
195 class CLDOopClosure : public OffHeapScanClosure {
196 ClassLoaderData* _scanned_cld;
197
198 template <typename T>
199 void do_oop_work(T* p) {
200 assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
201
202 try_scavenge(p, [&] (oop new_obj) {
203 assert(_scanned_cld != nullptr, "inv");
204 if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
205 _scanned_cld->record_modified_oops();
206 }
207 });
208 }
209
210 public:
211 CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
212 _scanned_cld(nullptr) {}
213
214 void set_scanned_cld(ClassLoaderData* cld) {
215 assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
216 _scanned_cld = cld;
217 }
218
219 void do_oop(oop* p) { do_oop_work(p); }
220 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
221 };
222
223 CLDOopClosure _oop_closure;
224 public:
225 CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
226
227 void do_cld(ClassLoaderData* cld) {
228 // If the cld has not been dirtied we know that there's
229 // no references into the young gen and we can skip it.
230 if (cld->has_modified_oops()) {
231
232 // Tell the closure which CLD is being scanned so that it can be dirtied
233 // if oops are left pointing into the young gen.
234 _oop_closure.set_scanned_cld(cld);
235
236 // Clean the cld since we're going to scavenge all the metadata.
237 cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
238
239 _oop_closure.set_scanned_cld(nullptr);
240 }
241 }
242 };
243
244 class IsAliveClosure: public BoolObjectClosure {
245 HeapWord* _young_gen_end;
246 public:
247 IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
248
249 bool do_object_b(oop p) {
250 return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
251 }
252 };
253
254 class AdjustWeakRootClosure: public OffHeapScanClosure {
255 template <class T>
256 void do_oop_work(T* p) {
257 DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
258 assert(!heap->is_in_reserved(p), "outside the heap");
259
260 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
261 if (is_in_young_gen(obj)) {
262 assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
263 assert(obj->is_forwarded(), "forwarded before weak-root-processing");
264 oop new_obj = obj->forwardee();
265 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
266 }
267 }
268 public:
269 AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
270
271 void do_oop(oop* p) { do_oop_work(p); }
272 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
273 };
274
275 class KeepAliveClosure: public OopClosure {
276 DefNewGeneration* _young_gen;
277 HeapWord* _young_gen_end;
278 CardTableRS* _rs;
279
280 bool is_in_young_gen(void* p) const {
281 return p < _young_gen_end;
282 }
283
284 template <class T>
285 void do_oop_work(T* p) {
286 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
287
288 if (is_in_young_gen(obj)) {
289 oop new_obj = obj->is_forwarded() ? obj->forwardee()
290 : _young_gen->copy_to_survivor_space(obj);
291 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
292
293 if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
294 _rs->inline_write_ref_field_gc(p);
295 }
296 }
297 }
298 public:
299 KeepAliveClosure(DefNewGeneration* g) :
300 _young_gen(g),
301 _young_gen_end(g->reserved().end()),
302 _rs(SerialHeap::heap()->rem_set()) {}
303
304 void do_oop(oop* p) { do_oop_work(p); }
305 void do_oop(narrowOop* p) { do_oop_work(p); }
306 };
307
308 class FastEvacuateFollowersClosure: public VoidClosure {
309 SerialHeap* _heap;
310 YoungGenScanClosure* _young_cl;
311 OldGenScanClosure* _old_cl;
312 public:
313 FastEvacuateFollowersClosure(SerialHeap* heap,
314 YoungGenScanClosure* young_cl,
315 OldGenScanClosure* old_cl) :
316 _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
317 {}
318
319 void do_void() {
320 do {
321 _heap->oop_since_save_marks_iterate(_young_cl, _old_cl);
322 } while (!_heap->no_allocs_since_save_marks());
323 guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
324 }
325 };
326
327 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
328 size_t initial_size,
329 size_t min_size,
330 size_t max_size,
331 const char* policy)
332 : Generation(rs, initial_size),
333 _preserved_marks_set(false /* in_c_heap */),
334 _promo_failure_drain_in_progress(false),
335 _should_allocate_from_space(false),
336 _string_dedup_requests()
337 {
338 MemRegion cmr((HeapWord*)_virtual_space.low(),
339 (HeapWord*)_virtual_space.high());
340 GenCollectedHeap* gch = GenCollectedHeap::heap();
341
342 gch->rem_set()->resize_covered_region(cmr);
343
344 _eden_space = new ContiguousSpace();
345 _from_space = new ContiguousSpace();
346 _to_space = new ContiguousSpace();
347
348 // Compute the maximum eden and survivor space sizes. These sizes
349 // are computed assuming the entire reserved space is committed.
350 // These values are exported as performance counters.
351 uintx size = _virtual_space.reserved_size();
352 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
353 _max_eden_size = size - (2*_max_survivor_size);
354
355 // allocate the performance counters
356
357 // Generation counters -- generation 0, 3 subspaces
358 _gen_counters = new GenerationCounters("new", 0, 3,
359 min_size, max_size, &_virtual_space);
360 _gc_counters = new CollectorCounters(policy, 0);
361
362 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
363 _gen_counters);
364 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
365 _gen_counters);
366 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
367 _gen_counters);
368
369 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
370 update_counters();
371 _old_gen = nullptr;
372 _tenuring_threshold = MaxTenuringThreshold;
373 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
374
375 _ref_processor = nullptr;
376
377 _gc_timer = new STWGCTimer();
378
379 _gc_tracer = new DefNewTracer();
380 }
381
382 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
383 bool clear_space,
384 bool mangle_space) {
385 // If the spaces are being cleared (only done at heap initialization
386 // currently), the survivor spaces need not be empty.
387 // Otherwise, no care is taken for used areas in the survivor spaces
388 // so check.
389 assert(clear_space || (to()->is_empty() && from()->is_empty()),
390 "Initialization of the survivor spaces assumes these are empty");
391
392 // Compute sizes
393 uintx size = _virtual_space.committed_size();
394 uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
395 uintx eden_size = size - (2*survivor_size);
396 if (eden_size > max_eden_size()) {
397 // Need to reduce eden_size to satisfy the max constraint. The delta needs
398 // to be 2*SpaceAlignment aligned so that both survivors are properly
399 // aligned.
400 uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
401 eden_size -= eden_delta;
402 survivor_size += eden_delta/2;
403 }
404 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
405
406 if (eden_size < minimum_eden_size) {
407 // May happen due to 64Kb rounding, if so adjust eden size back up
408 minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
409 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
410 uintx unaligned_survivor_size =
411 align_down(maximum_survivor_size, SpaceAlignment);
412 survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
413 eden_size = size - (2*survivor_size);
414 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
415 assert(eden_size >= minimum_eden_size, "just checking");
416 }
417
418 char *eden_start = _virtual_space.low();
419 char *from_start = eden_start + eden_size;
420 char *to_start = from_start + survivor_size;
421 char *to_end = to_start + survivor_size;
422
423 assert(to_end == _virtual_space.high(), "just checking");
424 assert(Space::is_aligned(eden_start), "checking alignment");
425 assert(Space::is_aligned(from_start), "checking alignment");
426 assert(Space::is_aligned(to_start), "checking alignment");
427
428 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
429 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
430 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
431
432 // A minimum eden size implies that there is a part of eden that
433 // is being used and that affects the initialization of any
434 // newly formed eden.
435 bool live_in_eden = minimum_eden_size > 0;
436
437 // If not clearing the spaces, do some checking to verify that
438 // the space are already mangled.
439 if (!clear_space) {
440 // Must check mangling before the spaces are reshaped. Otherwise,
441 // the bottom or end of one space may have moved into another
442 // a failure of the check may not correctly indicate which space
443 // is not properly mangled.
444 if (ZapUnusedHeapArea) {
445 HeapWord* limit = (HeapWord*) _virtual_space.high();
446 eden()->check_mangled_unused_area(limit);
447 from()->check_mangled_unused_area(limit);
448 to()->check_mangled_unused_area(limit);
449 }
450 }
451
452 // Reset the spaces for their new regions.
453 eden()->initialize(edenMR,
454 clear_space && !live_in_eden,
455 SpaceDecorator::Mangle);
456 // If clear_space and live_in_eden, we will not have cleared any
457 // portion of eden above its top. This can cause newly
458 // expanded space not to be mangled if using ZapUnusedHeapArea.
459 // We explicitly do such mangling here.
460 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
461 eden()->mangle_unused_area();
462 }
463 from()->initialize(fromMR, clear_space, mangle_space);
464 to()->initialize(toMR, clear_space, mangle_space);
465
466 // Set next compaction spaces.
467 eden()->set_next_compaction_space(from());
468 // The to-space is normally empty before a compaction so need
469 // not be considered. The exception is during promotion
470 // failure handling when to-space can contain live objects.
471 from()->set_next_compaction_space(nullptr);
472 }
473
474 void DefNewGeneration::swap_spaces() {
475 ContiguousSpace* s = from();
476 _from_space = to();
477 _to_space = s;
478 eden()->set_next_compaction_space(from());
479 // The to-space is normally empty before a compaction so need
480 // not be considered. The exception is during promotion
481 // failure handling when to-space can contain live objects.
482 from()->set_next_compaction_space(nullptr);
483
484 if (UsePerfData) {
485 CSpaceCounters* c = _from_counters;
486 _from_counters = _to_counters;
487 _to_counters = c;
488 }
489 }
490
491 bool DefNewGeneration::expand(size_t bytes) {
492 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
493 bool success = _virtual_space.expand_by(bytes);
494 if (success && ZapUnusedHeapArea) {
495 // Mangle newly committed space immediately because it
496 // can be done here more simply that after the new
497 // spaces have been computed.
498 HeapWord* new_high = (HeapWord*) _virtual_space.high();
499 MemRegion mangle_region(prev_high, new_high);
500 SpaceMangler::mangle_region(mangle_region);
501 }
502
503 // Do not attempt an expand-to-the reserve size. The
504 // request should properly observe the maximum size of
505 // the generation so an expand-to-reserve should be
506 // unnecessary. Also a second call to expand-to-reserve
507 // value potentially can cause an undue expansion.
508 // For example if the first expand fail for unknown reasons,
509 // but the second succeeds and expands the heap to its maximum
510 // value.
511 if (GCLocker::is_active()) {
512 log_debug(gc)("Garbage collection disabled, expanded heap instead");
513 }
514
515 return success;
516 }
517
518 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
519 size_t thread_increase_size = 0;
520 // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
521 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
522 thread_increase_size = threads_count * NewSizeThreadIncrease;
523 }
524 return thread_increase_size;
525 }
526
527 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
528 size_t new_size_before,
529 size_t alignment,
530 size_t thread_increase_size) const {
531 size_t desired_new_size = new_size_before;
532
533 if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
534
535 // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
536 if (new_size_candidate <= max_uintx - thread_increase_size) {
537 new_size_candidate += thread_increase_size;
538
539 // 2. Check an overflow at 'align_up'.
540 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
541 if (new_size_candidate <= aligned_max) {
542 desired_new_size = align_up(new_size_candidate, alignment);
543 }
544 }
545 }
546
547 return desired_new_size;
548 }
549
550 void DefNewGeneration::compute_new_size() {
551 // This is called after a GC that includes the old generation, so from-space
552 // will normally be empty.
553 // Note that we check both spaces, since if scavenge failed they revert roles.
554 // If not we bail out (otherwise we would have to relocate the objects).
555 if (!from()->is_empty() || !to()->is_empty()) {
556 return;
557 }
558
559 GenCollectedHeap* gch = GenCollectedHeap::heap();
560
561 size_t old_size = gch->old_gen()->capacity();
562 size_t new_size_before = _virtual_space.committed_size();
563 size_t min_new_size = initial_size();
564 size_t max_new_size = reserved().byte_size();
565 assert(min_new_size <= new_size_before &&
566 new_size_before <= max_new_size,
567 "just checking");
568 // All space sizes must be multiples of Generation::GenGrain.
569 size_t alignment = Generation::GenGrain;
570
571 int threads_count = Threads::number_of_non_daemon_threads();
572 size_t thread_increase_size = calculate_thread_increase_size(threads_count);
573
574 size_t new_size_candidate = old_size / NewRatio;
575 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
576 // and reverts to previous value if any overflow happens
577 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
578 alignment, thread_increase_size);
579
580 // Adjust new generation size
581 desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
582 assert(desired_new_size <= max_new_size, "just checking");
583
584 bool changed = false;
585 if (desired_new_size > new_size_before) {
586 size_t change = desired_new_size - new_size_before;
587 assert(change % alignment == 0, "just checking");
588 if (expand(change)) {
589 changed = true;
590 }
591 // If the heap failed to expand to the desired size,
592 // "changed" will be false. If the expansion failed
593 // (and at this point it was expected to succeed),
594 // ignore the failure (leaving "changed" as false).
595 }
596 if (desired_new_size < new_size_before && eden()->is_empty()) {
597 // bail out of shrinking if objects in eden
598 size_t change = new_size_before - desired_new_size;
599 assert(change % alignment == 0, "just checking");
600 _virtual_space.shrink_by(change);
601 changed = true;
602 }
603 if (changed) {
604 // The spaces have already been mangled at this point but
605 // may not have been cleared (set top = bottom) and should be.
606 // Mangling was done when the heap was being expanded.
607 compute_space_boundaries(eden()->used(),
608 SpaceDecorator::Clear,
609 SpaceDecorator::DontMangle);
610 MemRegion cmr((HeapWord*)_virtual_space.low(),
611 (HeapWord*)_virtual_space.high());
612 gch->rem_set()->resize_covered_region(cmr);
613
614 log_debug(gc, ergo, heap)(
615 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
616 new_size_before/K, _virtual_space.committed_size()/K,
617 eden()->capacity()/K, from()->capacity()/K);
618 log_trace(gc, ergo, heap)(
619 " [allowed " SIZE_FORMAT "K extra for %d threads]",
620 thread_increase_size/K, threads_count);
621 }
622 }
623
624 void DefNewGeneration::ref_processor_init() {
625 assert(_ref_processor == nullptr, "a reference processor already exists");
626 assert(!_reserved.is_empty(), "empty generation?");
627 _span_based_discoverer.set_span(_reserved);
628 _ref_processor = new ReferenceProcessor(&_span_based_discoverer); // a vanilla reference processor
629 }
630
631 size_t DefNewGeneration::capacity() const {
632 return eden()->capacity()
633 + from()->capacity(); // to() is only used during scavenge
634 }
635
636
637 size_t DefNewGeneration::used() const {
638 return eden()->used()
639 + from()->used(); // to() is only used during scavenge
640 }
641
642
643 size_t DefNewGeneration::free() const {
644 return eden()->free()
645 + from()->free(); // to() is only used during scavenge
646 }
647
648 size_t DefNewGeneration::max_capacity() const {
649 const size_t reserved_bytes = reserved().byte_size();
650 return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
651 }
652
653 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
654 return eden()->free();
655 }
656
657 size_t DefNewGeneration::capacity_before_gc() const {
658 return eden()->capacity();
659 }
660
661 size_t DefNewGeneration::contiguous_available() const {
662 return eden()->free();
663 }
664
665
666 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
667 eden()->object_iterate(blk);
668 from()->object_iterate(blk);
669 }
670
671
672 void DefNewGeneration::space_iterate(SpaceClosure* blk,
673 bool usedOnly) {
674 blk->do_space(eden());
675 blk->do_space(from());
676 blk->do_space(to());
677 }
678
679 // The last collection bailed out, we are running out of heap space,
680 // so we try to allocate the from-space, too.
681 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
682 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
683
684 // If the Heap_lock is not locked by this thread, this will be called
685 // again later with the Heap_lock held.
686 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
687
688 HeapWord* result = nullptr;
689 if (do_alloc) {
690 result = from()->allocate(size);
691 }
692
693 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s",
694 size,
695 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
696 "true" : "false",
697 Heap_lock->is_locked() ? "locked" : "unlocked",
698 from()->free(),
699 should_try_alloc ? "" : " should_allocate_from_space: NOT",
700 do_alloc ? " Heap_lock is not owned by self" : "",
701 result == nullptr ? "null" : "object");
702
703 return result;
704 }
705
706 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
707 // We don't attempt to expand the young generation (but perhaps we should.)
708 return allocate(size, is_tlab);
709 }
710
711 void DefNewGeneration::adjust_desired_tenuring_threshold() {
712 // Set the desired survivor size to half the real survivor space
713 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
714 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
715
716 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
717
718 if (UsePerfData) {
719 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
720 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
721 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
722 }
723
724 age_table()->print_age_table(_tenuring_threshold);
725 }
726
727 void DefNewGeneration::collect(bool full,
728 bool clear_all_soft_refs,
729 size_t size,
730 bool is_tlab) {
731 assert(full || size > 0, "otherwise we don't want to collect");
732
733 SerialHeap* heap = SerialHeap::heap();
734
735 // If the next generation is too full to accommodate promotion
736 // from this generation, pass on collection; let the next generation
737 // do it.
738 if (!collection_attempt_is_safe()) {
739 log_trace(gc)(":: Collection attempt not safe ::");
740 heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
741 return;
742 }
743 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
744 _gc_timer->register_gc_start();
745 _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
746 _ref_processor->start_discovery(clear_all_soft_refs);
747
748 _old_gen = heap->old_gen();
749
750 init_assuming_no_promotion_failure();
751
752 GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
753
754 heap->trace_heap_before_gc(_gc_tracer);
755
756 // These can be shared for all code paths
757 IsAliveClosure is_alive(this);
758
759 age_table()->clear();
760 to()->clear(SpaceDecorator::Mangle);
761 // The preserved marks should be empty at the start of the GC.
762 _preserved_marks_set.init(1);
763
764 assert(heap->no_allocs_since_save_marks(),
765 "save marks have not been newly set.");
766
767 YoungGenScanClosure young_gen_cl(this);
768 OldGenScanClosure old_gen_cl(this);
769
770 FastEvacuateFollowersClosure evacuate_followers(heap,
771 &young_gen_cl,
772 &old_gen_cl);
773
774 assert(heap->no_allocs_since_save_marks(),
775 "save marks have not been newly set.");
776
777 {
778 StrongRootsScope srs(0);
779 RootScanClosure root_cl{this};
780 CLDScanClosure cld_scan_closure{this};
781
782 heap->young_process_roots(&root_cl,
783 &old_gen_cl,
784 &cld_scan_closure);
785 }
786
787 // "evacuate followers".
788 evacuate_followers.do_void();
789
790 {
791 // Reference processing
792 KeepAliveClosure keep_alive(this);
793 ReferenceProcessor* rp = ref_processor();
794 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
795 SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
796 const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
797 _gc_tracer->report_gc_reference_stats(stats);
798 _gc_tracer->report_tenuring_threshold(tenuring_threshold());
799 pt.print_all_references();
800 }
801 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
802
803 {
804 AdjustWeakRootClosure cl{this};
805 WeakProcessor::weak_oops_do(&is_alive, &cl);
806 }
807
808 // Verify that the usage of keep_alive didn't copy any objects.
809 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
810
811 _string_dedup_requests.flush();
812
813 if (!_promotion_failed) {
814 // Swap the survivor spaces.
815 eden()->clear(SpaceDecorator::Mangle);
816 from()->clear(SpaceDecorator::Mangle);
817 if (ZapUnusedHeapArea) {
818 // This is now done here because of the piece-meal mangling which
819 // can check for valid mangling at intermediate points in the
820 // collection(s). When a young collection fails to collect
821 // sufficient space resizing of the young generation can occur
822 // an redistribute the spaces in the young generation. Mangle
823 // here so that unzapped regions don't get distributed to
824 // other spaces.
825 to()->mangle_unused_area();
826 }
827 swap_spaces();
828
829 assert(to()->is_empty(), "to space should be empty now");
830
831 adjust_desired_tenuring_threshold();
832
833 // A successful scavenge should restart the GC time limit count which is
834 // for full GC's.
835 AdaptiveSizePolicy* size_policy = heap->size_policy();
836 size_policy->reset_gc_overhead_limit_count();
837 assert(!heap->incremental_collection_failed(), "Should be clear");
838 } else {
839 assert(_promo_failure_scan_stack.is_empty(), "post condition");
840 _promo_failure_scan_stack.clear(true); // Clear cached segments.
841
842 remove_forwarding_pointers();
843 log_info(gc, promotion)("Promotion failed");
844 // Add to-space to the list of space to compact
845 // when a promotion failure has occurred. In that
846 // case there can be live objects in to-space
847 // as a result of a partial evacuation of eden
848 // and from-space.
849 swap_spaces(); // For uniformity wrt ParNewGeneration.
850 from()->set_next_compaction_space(to());
851 heap->set_incremental_collection_failed();
852
853 // Inform the next generation that a promotion failure occurred.
854 _old_gen->promotion_failure_occurred();
855 _gc_tracer->report_promotion_failed(_promotion_failed_info);
856
857 // Reset the PromotionFailureALot counters.
858 NOT_PRODUCT(heap->reset_promotion_should_fail();)
859 }
860 // We should have processed and cleared all the preserved marks.
861 _preserved_marks_set.reclaim();
862
863 heap->trace_heap_after_gc(_gc_tracer);
864
865 _gc_timer->register_gc_end();
866
867 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
868 }
869
870 void DefNewGeneration::init_assuming_no_promotion_failure() {
871 _promotion_failed = false;
872 _promotion_failed_info.reset();
873 from()->set_next_compaction_space(nullptr);
874 }
875
876 void DefNewGeneration::remove_forwarding_pointers() {
877 assert(_promotion_failed, "precondition");
878
879 // Will enter Full GC soon due to failed promotion. Must reset the mark word
880 // of objs in young-gen so that no objs are marked (forwarded) when Full GC
881 // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
882 struct ResetForwardedMarkWord : ObjectClosure {
883 void do_object(oop obj) override {
884 if (obj->is_forwarded()) {
885 obj->forward_safe_init_mark();
886 }
887 }
888 } cl;
889 eden()->object_iterate(&cl);
890 from()->object_iterate(&cl);
891
892 restore_preserved_marks();
893 }
894
895 void DefNewGeneration::restore_preserved_marks() {
896 _preserved_marks_set.restore(nullptr);
897 }
898
899 void DefNewGeneration::handle_promotion_failure(oop old) {
900 log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
901
902 _promotion_failed = true;
903 _promotion_failed_info.register_copy_failure(old->size());
904 _preserved_marks_set.get()->push_if_necessary(old, old->mark());
905
906 ContinuationGCSupport::transform_stack_chunk(old);
907
908 old->forward_to_self();
909
910 _promo_failure_scan_stack.push(old);
911
912 if (!_promo_failure_drain_in_progress) {
913 // prevent recursion in copy_to_survivor_space()
914 _promo_failure_drain_in_progress = true;
915 drain_promo_failure_scan_stack();
916 _promo_failure_drain_in_progress = false;
917 }
918 }
919
920 oop DefNewGeneration::copy_to_survivor_space(oop old) {
921 assert(is_in_reserved(old) && !old->is_forwarded(),
922 "shouldn't be scavenging this oop");
923 size_t s = old->size();
924 oop obj = nullptr;
925
926 // Try allocating obj in to-space (unless too old)
927 if (old->age() < tenuring_threshold()) {
928 obj = cast_to_oop(to()->allocate(s));
929 }
930
931 bool new_obj_is_tenured = false;
932 // Otherwise try allocating obj tenured
933 if (obj == nullptr) {
934 obj = _old_gen->promote(old, s);
935 if (obj == nullptr) {
936 handle_promotion_failure(old);
937 return old;
938 }
939 new_obj_is_tenured = true;
940 } else {
941 // Prefetch beyond obj
942 const intx interval = PrefetchCopyIntervalInBytes;
943 Prefetch::write(obj, interval);
944
945 // Copy obj
946 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
947
948 ContinuationGCSupport::transform_stack_chunk(obj);
949
950 // Increment age if obj still in new generation
951 obj->incr_age();
952 age_table()->add(obj, s);
953 }
954
955 // Done, insert forward pointer to obj in this header
956 old->forward_to(obj);
957
958 if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
959 // Record old; request adds a new weak reference, which reference
960 // processing expects to refer to a from-space object.
961 _string_dedup_requests.add(old);
962 }
963 return obj;
964 }
965
966 void DefNewGeneration::drain_promo_failure_scan_stack() {
967 PromoteFailureClosure cl{this};
968 while (!_promo_failure_scan_stack.is_empty()) {
969 oop obj = _promo_failure_scan_stack.pop();
970 obj->oop_iterate(&cl);
971 }
972 }
973
974 void DefNewGeneration::save_marks() {
975 eden()->set_saved_mark();
976 to()->set_saved_mark();
977 from()->set_saved_mark();
978 }
979
980
981 bool DefNewGeneration::no_allocs_since_save_marks() {
982 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
983 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
984 return to()->saved_mark_at_top();
985 }
986
987 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
988 size_t max_alloc_words) {
989 if (requestor == this || _promotion_failed) {
990 return;
991 }
992 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
993
994 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
995 if (to_space->top() > to_space->bottom()) {
996 trace("to_space not empty when contribute_scratch called");
997 }
998 */
999
1000 ContiguousSpace* to_space = to();
1001 assert(to_space->end() >= to_space->top(), "pointers out of order");
1002 size_t free_words = pointer_delta(to_space->end(), to_space->top());
1003 if (free_words >= MinFreeScratchWords) {
1004 ScratchBlock* sb = (ScratchBlock*)to_space->top();
1005 sb->num_words = free_words;
1006 sb->next = list;
1007 list = sb;
1008 }
1009 }
1010
1011 void DefNewGeneration::reset_scratch() {
1012 // If contributing scratch in to_space, mangle all of
1013 // to_space if ZapUnusedHeapArea. This is needed because
1014 // top is not maintained while using to-space as scratch.
1015 if (ZapUnusedHeapArea) {
1016 to()->mangle_unused_area_complete();
1017 }
1018 }
1019
1020 bool DefNewGeneration::collection_attempt_is_safe() {
1021 if (!to()->is_empty()) {
1022 log_trace(gc)(":: to is not empty ::");
1023 return false;
1024 }
1025 if (_old_gen == nullptr) {
1026 GenCollectedHeap* gch = GenCollectedHeap::heap();
1027 _old_gen = gch->old_gen();
1028 }
1029 return _old_gen->promotion_attempt_is_safe(used());
1030 }
1031
1032 void DefNewGeneration::gc_epilogue(bool full) {
1033 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
1034
1035 assert(!GCLocker::is_active(), "We should not be executing here");
1036 // Check if the heap is approaching full after a collection has
1037 // been done. Generally the young generation is empty at
1038 // a minimum at the end of a collection. If it is not, then
1039 // the heap is approaching full.
1040 GenCollectedHeap* gch = GenCollectedHeap::heap();
1041 if (full) {
1042 DEBUG_ONLY(seen_incremental_collection_failed = false;)
1043 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
1044 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
1045 GCCause::to_string(gch->gc_cause()));
1046 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
1047 set_should_allocate_from_space(); // we seem to be running out of space
1048 } else {
1049 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
1050 GCCause::to_string(gch->gc_cause()));
1051 gch->clear_incremental_collection_failed(); // We just did a full collection
1052 clear_should_allocate_from_space(); // if set
1053 }
1054 } else {
1055 #ifdef ASSERT
1056 // It is possible that incremental_collection_failed() == true
1057 // here, because an attempted scavenge did not succeed. The policy
1058 // is normally expected to cause a full collection which should
1059 // clear that condition, so we should not be here twice in a row
1060 // with incremental_collection_failed() == true without having done
1061 // a full collection in between.
1062 if (!seen_incremental_collection_failed &&
1063 gch->incremental_collection_failed()) {
1064 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
1065 GCCause::to_string(gch->gc_cause()));
1066 seen_incremental_collection_failed = true;
1067 } else if (seen_incremental_collection_failed) {
1068 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
1069 GCCause::to_string(gch->gc_cause()));
1070 seen_incremental_collection_failed = false;
1071 }
1072 #endif // ASSERT
1073 }
1074
1075 if (ZapUnusedHeapArea) {
1076 eden()->check_mangled_unused_area_complete();
1077 from()->check_mangled_unused_area_complete();
1078 to()->check_mangled_unused_area_complete();
1079 }
1080
1081 // update the generation and space performance counters
1082 update_counters();
1083 gch->counters()->update_counters();
1084 }
1085
1086 void DefNewGeneration::record_spaces_top() {
1087 assert(ZapUnusedHeapArea, "Not mangling unused space");
1088 eden()->set_top_for_allocations();
1089 to()->set_top_for_allocations();
1090 from()->set_top_for_allocations();
1091 }
1092
1093 void DefNewGeneration::update_counters() {
1094 if (UsePerfData) {
1095 _eden_counters->update_all();
1096 _from_counters->update_all();
1097 _to_counters->update_all();
1098 _gen_counters->update_all();
1099 }
1100 }
1101
1102 void DefNewGeneration::verify() {
1103 eden()->verify();
1104 from()->verify();
1105 to()->verify();
1106 }
1107
1108 void DefNewGeneration::print_on(outputStream* st) const {
1109 Generation::print_on(st);
1110 st->print(" eden");
1111 eden()->print_on(st);
1112 st->print(" from");
1113 from()->print_on(st);
1114 st->print(" to ");
1115 to()->print_on(st);
1116 }
1117
1118
1119 const char* DefNewGeneration::name() const {
1120 return "def new generation";
1121 }
1122
1123 // Moved from inline file as they are not called inline
1124 ContiguousSpace* DefNewGeneration::first_compaction_space() const {
1125 return eden();
1126 }
1127
1128 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1129 // This is the slow-path allocation for the DefNewGeneration.
1130 // Most allocations are fast-path in compiled code.
1131 // We try to allocate from the eden. If that works, we are happy.
1132 // Note that since DefNewGeneration supports lock-free allocation, we
1133 // have to use it here, as well.
1134 HeapWord* result = eden()->par_allocate(word_size);
1135 if (result == nullptr) {
1136 // If the eden is full and the last collection bailed out, we are running
1137 // out of heap space, and we try to allocate the from-space, too.
1138 // allocate_from_space can't be inlined because that would introduce a
1139 // circular dependency at compile time.
1140 result = allocate_from_space(word_size);
1141 }
1142 return result;
1143 }
1144
1145 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1146 bool is_tlab) {
1147 return eden()->par_allocate(word_size);
1148 }
1149
1150 size_t DefNewGeneration::tlab_capacity() const {
1151 return eden()->capacity();
1152 }
1153
1154 size_t DefNewGeneration::tlab_used() const {
1155 return eden()->used();
1156 }
1157
1158 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1159 return unsafe_max_alloc_nogc();
1160 }
--- EOF ---