1 /*
2 * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/serial/defNewGeneration.inline.hpp"
27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
28 #include "gc/serial/serialHeap.inline.hpp"
29 #include "gc/serial/tenuredGeneration.hpp"
30 #include "gc/shared/adaptiveSizePolicy.hpp"
31 #include "gc/shared/ageTable.inline.hpp"
32 #include "gc/shared/cardTableRS.hpp"
33 #include "gc/shared/collectorCounters.hpp"
34 #include "gc/shared/gcArguments.hpp"
35 #include "gc/shared/gcHeapSummary.hpp"
36 #include "gc/shared/gcLocker.hpp"
37 #include "gc/shared/gcPolicyCounters.hpp"
38 #include "gc/shared/gcTimer.hpp"
39 #include "gc/shared/gcTrace.hpp"
40 #include "gc/shared/gcTraceTime.inline.hpp"
41 #include "gc/shared/generationSpec.hpp"
42 #include "gc/shared/genOopClosures.inline.hpp"
43 #include "gc/shared/preservedMarks.inline.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
46 #include "gc/shared/space.inline.hpp"
47 #include "gc/shared/spaceDecorator.inline.hpp"
48 #include "gc/shared/strongRootsScope.hpp"
49 #include "gc/shared/weakProcessor.hpp"
50 #include "logging/log.hpp"
51 #include "memory/iterator.inline.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "oops/instanceRefKlass.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/java.hpp"
56 #include "runtime/prefetch.inline.hpp"
57 #include "runtime/thread.inline.hpp"
58 #include "utilities/align.hpp"
59 #include "utilities/copy.hpp"
60 #include "utilities/globalDefinitions.hpp"
61 #include "utilities/stack.inline.hpp"
62
63 //
64 // DefNewGeneration functions.
65
66 // Methods of protected closure types.
67
68 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
69 assert(_young_gen->kind() == Generation::DefNew, "Expected the young generation here");
70 }
71
72 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
73 return cast_from_oop<HeapWord*>(p) >= _young_gen->reserved().end() || p->is_forwarded();
74 }
75
76 DefNewGeneration::KeepAliveClosure::
77 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
78 _rs = GenCollectedHeap::heap()->rem_set();
79 }
80
81 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
82 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
83
84
85 DefNewGeneration::FastKeepAliveClosure::
86 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
87 DefNewGeneration::KeepAliveClosure(cl) {
88 _boundary = g->reserved().end();
89 }
90
91 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
92 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
93
94 DefNewGeneration::FastEvacuateFollowersClosure::
95 FastEvacuateFollowersClosure(SerialHeap* heap,
96 DefNewScanClosure* cur,
97 DefNewYoungerGenClosure* older) :
98 _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older)
99 {
100 }
101
102 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
103 do {
104 _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older);
105 } while (!_heap->no_allocs_since_save_marks());
106 guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
107 }
108
109 void CLDScanClosure::do_cld(ClassLoaderData* cld) {
110 NOT_PRODUCT(ResourceMark rm);
111 log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
112 p2i(cld),
113 cld->loader_name_and_id(),
114 cld->has_modified_oops() ? "true" : "false");
115
116 // If the cld has not been dirtied we know that there's
117 // no references into the young gen and we can skip it.
118 if (cld->has_modified_oops()) {
119
120 // Tell the closure which CLD is being scanned so that it can be dirtied
121 // if oops are left pointing into the young gen.
122 _scavenge_closure->set_scanned_cld(cld);
123
124 // Clean the cld since we're going to scavenge all the metadata.
125 cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
126
127 _scavenge_closure->set_scanned_cld(NULL);
128 }
129 }
130
131 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
132 _g(g)
133 {
134 _boundary = _g->reserved().end();
135 }
136
137 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
138 size_t initial_size,
139 size_t min_size,
140 size_t max_size,
141 const char* policy)
142 : Generation(rs, initial_size),
143 _preserved_marks_set(false /* in_c_heap */),
144 _promo_failure_drain_in_progress(false),
145 _should_allocate_from_space(false)
146 {
147 MemRegion cmr((HeapWord*)_virtual_space.low(),
148 (HeapWord*)_virtual_space.high());
149 GenCollectedHeap* gch = GenCollectedHeap::heap();
150
151 gch->rem_set()->resize_covered_region(cmr);
152
153 _eden_space = new ContiguousSpace();
154 _from_space = new ContiguousSpace();
155 _to_space = new ContiguousSpace();
156
157 // Compute the maximum eden and survivor space sizes. These sizes
158 // are computed assuming the entire reserved space is committed.
159 // These values are exported as performance counters.
160 uintx size = _virtual_space.reserved_size();
161 _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
162 _max_eden_size = size - (2*_max_survivor_size);
163
164 // allocate the performance counters
165
166 // Generation counters -- generation 0, 3 subspaces
167 _gen_counters = new GenerationCounters("new", 0, 3,
168 min_size, max_size, &_virtual_space);
169 _gc_counters = new CollectorCounters(policy, 0);
170
171 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
172 _gen_counters);
173 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
174 _gen_counters);
175 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
176 _gen_counters);
177
178 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
179 update_counters();
180 _old_gen = NULL;
181 _tenuring_threshold = MaxTenuringThreshold;
182 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
183
184 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
185 }
186
187 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
188 bool clear_space,
189 bool mangle_space) {
190 // If the spaces are being cleared (only done at heap initialization
191 // currently), the survivor spaces need not be empty.
192 // Otherwise, no care is taken for used areas in the survivor spaces
193 // so check.
194 assert(clear_space || (to()->is_empty() && from()->is_empty()),
195 "Initialization of the survivor spaces assumes these are empty");
196
197 // Compute sizes
198 uintx size = _virtual_space.committed_size();
199 uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
200 uintx eden_size = size - (2*survivor_size);
201 if (eden_size > max_eden_size()) {
202 eden_size = max_eden_size();
203 survivor_size = (size - eden_size)/2;
204 }
205 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
206
207 if (eden_size < minimum_eden_size) {
208 // May happen due to 64Kb rounding, if so adjust eden size back up
209 minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
210 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
211 uintx unaligned_survivor_size =
212 align_down(maximum_survivor_size, SpaceAlignment);
213 survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
214 eden_size = size - (2*survivor_size);
215 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
216 assert(eden_size >= minimum_eden_size, "just checking");
217 }
218
219 char *eden_start = _virtual_space.low();
220 char *from_start = eden_start + eden_size;
221 char *to_start = from_start + survivor_size;
222 char *to_end = to_start + survivor_size;
223
224 assert(to_end == _virtual_space.high(), "just checking");
225 assert(Space::is_aligned(eden_start), "checking alignment");
226 assert(Space::is_aligned(from_start), "checking alignment");
227 assert(Space::is_aligned(to_start), "checking alignment");
228
229 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
230 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
231 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
232
233 // A minimum eden size implies that there is a part of eden that
234 // is being used and that affects the initialization of any
235 // newly formed eden.
236 bool live_in_eden = minimum_eden_size > 0;
237
238 // If not clearing the spaces, do some checking to verify that
239 // the space are already mangled.
240 if (!clear_space) {
241 // Must check mangling before the spaces are reshaped. Otherwise,
242 // the bottom or end of one space may have moved into another
243 // a failure of the check may not correctly indicate which space
244 // is not properly mangled.
245 if (ZapUnusedHeapArea) {
246 HeapWord* limit = (HeapWord*) _virtual_space.high();
247 eden()->check_mangled_unused_area(limit);
248 from()->check_mangled_unused_area(limit);
249 to()->check_mangled_unused_area(limit);
250 }
251 }
252
253 // Reset the spaces for their new regions.
254 eden()->initialize(edenMR,
255 clear_space && !live_in_eden,
256 SpaceDecorator::Mangle);
257 // If clear_space and live_in_eden, we will not have cleared any
258 // portion of eden above its top. This can cause newly
259 // expanded space not to be mangled if using ZapUnusedHeapArea.
260 // We explicitly do such mangling here.
261 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
262 eden()->mangle_unused_area();
263 }
264 from()->initialize(fromMR, clear_space, mangle_space);
265 to()->initialize(toMR, clear_space, mangle_space);
266
267 // Set next compaction spaces.
268 eden()->set_next_compaction_space(from());
269 // The to-space is normally empty before a compaction so need
270 // not be considered. The exception is during promotion
271 // failure handling when to-space can contain live objects.
272 from()->set_next_compaction_space(NULL);
273 }
274
275 void DefNewGeneration::swap_spaces() {
276 ContiguousSpace* s = from();
277 _from_space = to();
278 _to_space = s;
279 eden()->set_next_compaction_space(from());
280 // The to-space is normally empty before a compaction so need
281 // not be considered. The exception is during promotion
282 // failure handling when to-space can contain live objects.
283 from()->set_next_compaction_space(NULL);
284
285 if (UsePerfData) {
286 CSpaceCounters* c = _from_counters;
287 _from_counters = _to_counters;
288 _to_counters = c;
289 }
290 }
291
292 bool DefNewGeneration::expand(size_t bytes) {
293 MutexLocker x(ExpandHeap_lock);
294 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
295 bool success = _virtual_space.expand_by(bytes);
296 if (success && ZapUnusedHeapArea) {
297 // Mangle newly committed space immediately because it
298 // can be done here more simply that after the new
299 // spaces have been computed.
300 HeapWord* new_high = (HeapWord*) _virtual_space.high();
301 MemRegion mangle_region(prev_high, new_high);
302 SpaceMangler::mangle_region(mangle_region);
303 }
304
305 // Do not attempt an expand-to-the reserve size. The
306 // request should properly observe the maximum size of
307 // the generation so an expand-to-reserve should be
308 // unnecessary. Also a second call to expand-to-reserve
309 // value potentially can cause an undue expansion.
310 // For example if the first expand fail for unknown reasons,
311 // but the second succeeds and expands the heap to its maximum
312 // value.
313 if (GCLocker::is_active()) {
314 log_debug(gc)("Garbage collection disabled, expanded heap instead");
315 }
316
317 return success;
318 }
319
320 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
321 size_t new_size_before,
322 size_t alignment) const {
323 size_t desired_new_size = new_size_before;
324
325 if (NewSizeThreadIncrease > 0) {
326 int threads_count;
327 size_t thread_increase_size = 0;
328
329 // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
330 threads_count = Threads::number_of_non_daemon_threads();
331 if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
332 thread_increase_size = threads_count * NewSizeThreadIncrease;
333
334 // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
335 if (new_size_candidate <= max_uintx - thread_increase_size) {
336 new_size_candidate += thread_increase_size;
337
338 // 3. Check an overflow at 'align_up'.
339 size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
340 if (new_size_candidate <= aligned_max) {
341 desired_new_size = align_up(new_size_candidate, alignment);
342 }
343 }
344 }
345 }
346
347 return desired_new_size;
348 }
349
350 void DefNewGeneration::compute_new_size() {
351 // This is called after a GC that includes the old generation, so from-space
352 // will normally be empty.
353 // Note that we check both spaces, since if scavenge failed they revert roles.
354 // If not we bail out (otherwise we would have to relocate the objects).
355 if (!from()->is_empty() || !to()->is_empty()) {
356 return;
357 }
358
359 GenCollectedHeap* gch = GenCollectedHeap::heap();
360
361 size_t old_size = gch->old_gen()->capacity();
362 size_t new_size_before = _virtual_space.committed_size();
363 size_t min_new_size = initial_size();
364 size_t max_new_size = reserved().byte_size();
365 assert(min_new_size <= new_size_before &&
366 new_size_before <= max_new_size,
367 "just checking");
368 // All space sizes must be multiples of Generation::GenGrain.
369 size_t alignment = Generation::GenGrain;
370
371 int threads_count = 0;
372 size_t thread_increase_size = 0;
373
374 size_t new_size_candidate = old_size / NewRatio;
375 // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
376 // and reverts to previous value if any overflow happens
377 size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
378
379 // Adjust new generation size
380 desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
381 assert(desired_new_size <= max_new_size, "just checking");
382
383 bool changed = false;
384 if (desired_new_size > new_size_before) {
385 size_t change = desired_new_size - new_size_before;
386 assert(change % alignment == 0, "just checking");
387 if (expand(change)) {
388 changed = true;
389 }
390 // If the heap failed to expand to the desired size,
391 // "changed" will be false. If the expansion failed
392 // (and at this point it was expected to succeed),
393 // ignore the failure (leaving "changed" as false).
394 }
395 if (desired_new_size < new_size_before && eden()->is_empty()) {
396 // bail out of shrinking if objects in eden
397 size_t change = new_size_before - desired_new_size;
398 assert(change % alignment == 0, "just checking");
399 _virtual_space.shrink_by(change);
400 changed = true;
401 }
402 if (changed) {
403 // The spaces have already been mangled at this point but
404 // may not have been cleared (set top = bottom) and should be.
405 // Mangling was done when the heap was being expanded.
406 compute_space_boundaries(eden()->used(),
407 SpaceDecorator::Clear,
408 SpaceDecorator::DontMangle);
409 MemRegion cmr((HeapWord*)_virtual_space.low(),
410 (HeapWord*)_virtual_space.high());
411 gch->rem_set()->resize_covered_region(cmr);
412
413 log_debug(gc, ergo, heap)(
414 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
415 new_size_before/K, _virtual_space.committed_size()/K,
416 eden()->capacity()/K, from()->capacity()/K);
417 log_trace(gc, ergo, heap)(
418 " [allowed " SIZE_FORMAT "K extra for %d threads]",
419 thread_increase_size/K, threads_count);
420 }
421 }
422
423
424 size_t DefNewGeneration::capacity() const {
425 return eden()->capacity()
426 + from()->capacity(); // to() is only used during scavenge
427 }
428
429
430 size_t DefNewGeneration::used() const {
431 return eden()->used()
432 + from()->used(); // to() is only used during scavenge
433 }
434
435
436 size_t DefNewGeneration::free() const {
437 return eden()->free()
438 + from()->free(); // to() is only used during scavenge
439 }
440
441 size_t DefNewGeneration::max_capacity() const {
442 const size_t reserved_bytes = reserved().byte_size();
443 return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
444 }
445
446 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
447 return eden()->free();
448 }
449
450 size_t DefNewGeneration::capacity_before_gc() const {
451 return eden()->capacity();
452 }
453
454 size_t DefNewGeneration::contiguous_available() const {
455 return eden()->free();
456 }
457
458
459 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
460 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
461
462 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
463 eden()->object_iterate(blk);
464 from()->object_iterate(blk);
465 }
466
467
468 void DefNewGeneration::space_iterate(SpaceClosure* blk,
469 bool usedOnly) {
470 blk->do_space(eden());
471 blk->do_space(from());
472 blk->do_space(to());
473 }
474
475 // The last collection bailed out, we are running out of heap space,
476 // so we try to allocate the from-space, too.
477 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
478 bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
479
480 // If the Heap_lock is not locked by this thread, this will be called
481 // again later with the Heap_lock held.
482 bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
483
484 HeapWord* result = NULL;
485 if (do_alloc) {
486 result = from()->allocate(size);
487 }
488
489 log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "): will_fail: %s heap_lock: %s free: " SIZE_FORMAT "%s%s returns %s",
490 size,
491 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
492 "true" : "false",
493 Heap_lock->is_locked() ? "locked" : "unlocked",
494 from()->free(),
495 should_try_alloc ? "" : " should_allocate_from_space: NOT",
496 do_alloc ? " Heap_lock is not owned by self" : "",
497 result == NULL ? "NULL" : "object");
498
499 return result;
500 }
501
502 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
503 bool is_tlab,
504 bool parallel) {
505 // We don't attempt to expand the young generation (but perhaps we should.)
506 return allocate(size, is_tlab);
507 }
508
509 void DefNewGeneration::adjust_desired_tenuring_threshold() {
510 // Set the desired survivor size to half the real survivor space
511 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
512 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
513
514 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
515
516 if (UsePerfData) {
517 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
518 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
519 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
520 }
521
522 age_table()->print_age_table(_tenuring_threshold);
523 }
524
525 void DefNewGeneration::collect(bool full,
526 bool clear_all_soft_refs,
527 size_t size,
528 bool is_tlab) {
529 assert(full || size > 0, "otherwise we don't want to collect");
530
531 SerialHeap* heap = SerialHeap::heap();
532
533 _gc_timer->register_gc_start();
534 DefNewTracer gc_tracer;
535 gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
536
537 _old_gen = heap->old_gen();
538
539 // If the next generation is too full to accommodate promotion
540 // from this generation, pass on collection; let the next generation
541 // do it.
542 if (!collection_attempt_is_safe()) {
543 log_trace(gc)(":: Collection attempt not safe ::");
544 heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
545 return;
546 }
547 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
548
549 init_assuming_no_promotion_failure();
550
551 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());
552
553 heap->trace_heap_before_gc(&gc_tracer);
554
555 // These can be shared for all code paths
556 IsAliveClosure is_alive(this);
557 ScanWeakRefClosure scan_weak_ref(this);
558
559 age_table()->clear();
560 to()->clear(SpaceDecorator::Mangle);
561 // The preserved marks should be empty at the start of the GC.
562 _preserved_marks_set.init(1);
563
564 assert(heap->no_allocs_since_save_marks(),
565 "save marks have not been newly set.");
566
567 DefNewScanClosure scan_closure(this);
568 DefNewYoungerGenClosure younger_gen_closure(this, _old_gen);
569
570 CLDScanClosure cld_scan_closure(&scan_closure);
571
572 set_promo_failure_scan_stack_closure(&scan_closure);
573 FastEvacuateFollowersClosure evacuate_followers(heap,
574 &scan_closure,
575 &younger_gen_closure);
576
577 assert(heap->no_allocs_since_save_marks(),
578 "save marks have not been newly set.");
579
580 {
581 StrongRootsScope srs(0);
582
583 heap->young_process_roots(&scan_closure,
584 &younger_gen_closure,
585 &cld_scan_closure);
586 }
587
588 // "evacuate followers".
589 evacuate_followers.do_void();
590
591 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
592 ReferenceProcessor* rp = ref_processor();
593 rp->setup_policy(clear_all_soft_refs);
594 ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
595 SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
596 const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
597 gc_tracer.report_gc_reference_stats(stats);
598 gc_tracer.report_tenuring_threshold(tenuring_threshold());
599 pt.print_all_references();
600
601 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
602
603 WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
604
605 // Verify that the usage of keep_alive didn't copy any objects.
606 assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
607
608 if (!_promotion_failed) {
609 // Swap the survivor spaces.
610 eden()->clear(SpaceDecorator::Mangle);
611 from()->clear(SpaceDecorator::Mangle);
612 if (ZapUnusedHeapArea) {
613 // This is now done here because of the piece-meal mangling which
614 // can check for valid mangling at intermediate points in the
615 // collection(s). When a young collection fails to collect
616 // sufficient space resizing of the young generation can occur
617 // an redistribute the spaces in the young generation. Mangle
618 // here so that unzapped regions don't get distributed to
619 // other spaces.
620 to()->mangle_unused_area();
621 }
622 swap_spaces();
623
624 assert(to()->is_empty(), "to space should be empty now");
625
626 adjust_desired_tenuring_threshold();
627
628 // A successful scavenge should restart the GC time limit count which is
629 // for full GC's.
630 AdaptiveSizePolicy* size_policy = heap->size_policy();
631 size_policy->reset_gc_overhead_limit_count();
632 assert(!heap->incremental_collection_failed(), "Should be clear");
633 } else {
634 assert(_promo_failure_scan_stack.is_empty(), "post condition");
635 _promo_failure_scan_stack.clear(true); // Clear cached segments.
636
637 remove_forwarding_pointers();
638 log_info(gc, promotion)("Promotion failed");
639 // Add to-space to the list of space to compact
640 // when a promotion failure has occurred. In that
641 // case there can be live objects in to-space
642 // as a result of a partial evacuation of eden
643 // and from-space.
644 swap_spaces(); // For uniformity wrt ParNewGeneration.
645 from()->set_next_compaction_space(to());
646 heap->set_incremental_collection_failed();
647
648 // Inform the next generation that a promotion failure occurred.
649 _old_gen->promotion_failure_occurred();
650 gc_tracer.report_promotion_failed(_promotion_failed_info);
651
652 // Reset the PromotionFailureALot counters.
653 NOT_PRODUCT(heap->reset_promotion_should_fail();)
654 }
655 // We should have processed and cleared all the preserved marks.
656 _preserved_marks_set.reclaim();
657
658 heap->trace_heap_after_gc(&gc_tracer);
659
660 _gc_timer->register_gc_end();
661
662 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
663 }
664
665 void DefNewGeneration::init_assuming_no_promotion_failure() {
666 _promotion_failed = false;
667 _promotion_failed_info.reset();
668 from()->set_next_compaction_space(NULL);
669 }
670
671 void DefNewGeneration::remove_forwarding_pointers() {
672 RemoveForwardedPointerClosure rspc;
673 eden()->object_iterate(&rspc);
674 from()->object_iterate(&rspc);
675 restore_preserved_marks();
676 }
677
678 void DefNewGeneration::restore_preserved_marks() {
679 _preserved_marks_set.restore(NULL);
680 }
681
682 void DefNewGeneration::handle_promotion_failure(oop old) {
683 log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
684
685 _promotion_failed = true;
686 _promotion_failed_info.register_copy_failure(old->size());
687 _preserved_marks_set.get()->push_if_necessary(old, old->mark());
688 // forward to self
689 old->forward_to(old);
690
691 _promo_failure_scan_stack.push(old);
692
693 if (!_promo_failure_drain_in_progress) {
694 // prevent recursion in copy_to_survivor_space()
695 _promo_failure_drain_in_progress = true;
696 drain_promo_failure_scan_stack();
697 _promo_failure_drain_in_progress = false;
698 }
699 }
700
701 oop DefNewGeneration::copy_to_survivor_space(oop old) {
702 assert(is_in_reserved(old) && !old->is_forwarded(),
703 "shouldn't be scavenging this oop");
704 size_t s = old->size();
705 oop obj = NULL;
706
707 // Try allocating obj in to-space (unless too old)
708 if (old->age() < tenuring_threshold()) {
709 obj = cast_to_oop(to()->allocate(s));
710 }
711
712 // Otherwise try allocating obj tenured
713 if (obj == NULL) {
714 obj = _old_gen->promote(old, s);
715 if (obj == NULL) {
716 handle_promotion_failure(old);
717 return old;
718 }
719 } else {
720 // Prefetch beyond obj
721 const intx interval = PrefetchCopyIntervalInBytes;
722 Prefetch::write(obj, interval);
723
724 // Copy obj
725 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
726
727 // Increment age if obj still in new generation
728 obj->incr_age();
729 age_table()->add(obj, s);
730 }
731
732 // Done, insert forward pointer to obj in this header
733 old->forward_to(obj);
734
735 return obj;
736 }
737
738 void DefNewGeneration::drain_promo_failure_scan_stack() {
739 while (!_promo_failure_scan_stack.is_empty()) {
740 oop obj = _promo_failure_scan_stack.pop();
741 obj->oop_iterate(_promo_failure_scan_stack_closure);
742 }
743 }
744
745 void DefNewGeneration::save_marks() {
746 eden()->set_saved_mark();
747 to()->set_saved_mark();
748 from()->set_saved_mark();
749 }
750
751
752 void DefNewGeneration::reset_saved_marks() {
753 eden()->reset_saved_mark();
754 to()->reset_saved_mark();
755 from()->reset_saved_mark();
756 }
757
758
759 bool DefNewGeneration::no_allocs_since_save_marks() {
760 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
761 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
762 return to()->saved_mark_at_top();
763 }
764
765 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
766 size_t max_alloc_words) {
767 if (requestor == this || _promotion_failed) {
768 return;
769 }
770 assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
771
772 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
773 if (to_space->top() > to_space->bottom()) {
774 trace("to_space not empty when contribute_scratch called");
775 }
776 */
777
778 ContiguousSpace* to_space = to();
779 assert(to_space->end() >= to_space->top(), "pointers out of order");
780 size_t free_words = pointer_delta(to_space->end(), to_space->top());
781 if (free_words >= MinFreeScratchWords) {
782 ScratchBlock* sb = (ScratchBlock*)to_space->top();
783 sb->num_words = free_words;
784 sb->next = list;
785 list = sb;
786 }
787 }
788
789 void DefNewGeneration::reset_scratch() {
790 // If contributing scratch in to_space, mangle all of
791 // to_space if ZapUnusedHeapArea. This is needed because
792 // top is not maintained while using to-space as scratch.
793 if (ZapUnusedHeapArea) {
794 to()->mangle_unused_area_complete();
795 }
796 }
797
798 bool DefNewGeneration::collection_attempt_is_safe() {
799 if (!to()->is_empty()) {
800 log_trace(gc)(":: to is not empty ::");
801 return false;
802 }
803 if (_old_gen == NULL) {
804 GenCollectedHeap* gch = GenCollectedHeap::heap();
805 _old_gen = gch->old_gen();
806 }
807 return _old_gen->promotion_attempt_is_safe(used());
808 }
809
810 void DefNewGeneration::gc_epilogue(bool full) {
811 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
812
813 assert(!GCLocker::is_active(), "We should not be executing here");
814 // Check if the heap is approaching full after a collection has
815 // been done. Generally the young generation is empty at
816 // a minimum at the end of a collection. If it is not, then
817 // the heap is approaching full.
818 GenCollectedHeap* gch = GenCollectedHeap::heap();
819 if (full) {
820 DEBUG_ONLY(seen_incremental_collection_failed = false;)
821 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
822 log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
823 GCCause::to_string(gch->gc_cause()));
824 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
825 set_should_allocate_from_space(); // we seem to be running out of space
826 } else {
827 log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
828 GCCause::to_string(gch->gc_cause()));
829 gch->clear_incremental_collection_failed(); // We just did a full collection
830 clear_should_allocate_from_space(); // if set
831 }
832 } else {
833 #ifdef ASSERT
834 // It is possible that incremental_collection_failed() == true
835 // here, because an attempted scavenge did not succeed. The policy
836 // is normally expected to cause a full collection which should
837 // clear that condition, so we should not be here twice in a row
838 // with incremental_collection_failed() == true without having done
839 // a full collection in between.
840 if (!seen_incremental_collection_failed &&
841 gch->incremental_collection_failed()) {
842 log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
843 GCCause::to_string(gch->gc_cause()));
844 seen_incremental_collection_failed = true;
845 } else if (seen_incremental_collection_failed) {
846 log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
847 GCCause::to_string(gch->gc_cause()));
848 seen_incremental_collection_failed = false;
849 }
850 #endif // ASSERT
851 }
852
853 if (ZapUnusedHeapArea) {
854 eden()->check_mangled_unused_area_complete();
855 from()->check_mangled_unused_area_complete();
856 to()->check_mangled_unused_area_complete();
857 }
858
859 // update the generation and space performance counters
860 update_counters();
861 gch->counters()->update_counters();
862 }
863
864 void DefNewGeneration::record_spaces_top() {
865 assert(ZapUnusedHeapArea, "Not mangling unused space");
866 eden()->set_top_for_allocations();
867 to()->set_top_for_allocations();
868 from()->set_top_for_allocations();
869 }
870
871 void DefNewGeneration::ref_processor_init() {
872 Generation::ref_processor_init();
873 }
874
875
876 void DefNewGeneration::update_counters() {
877 if (UsePerfData) {
878 _eden_counters->update_all();
879 _from_counters->update_all();
880 _to_counters->update_all();
881 _gen_counters->update_all();
882 }
883 }
884
885 void DefNewGeneration::verify() {
886 eden()->verify();
887 from()->verify();
888 to()->verify();
889 }
890
891 void DefNewGeneration::print_on(outputStream* st) const {
892 Generation::print_on(st);
893 st->print(" eden");
894 eden()->print_on(st);
895 st->print(" from");
896 from()->print_on(st);
897 st->print(" to ");
898 to()->print_on(st);
899 }
900
901
902 const char* DefNewGeneration::name() const {
903 return "def new generation";
904 }
905
906 // Moved from inline file as they are not called inline
907 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
908 return eden();
909 }
910
911 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
912 // This is the slow-path allocation for the DefNewGeneration.
913 // Most allocations are fast-path in compiled code.
914 // We try to allocate from the eden. If that works, we are happy.
915 // Note that since DefNewGeneration supports lock-free allocation, we
916 // have to use it here, as well.
917 HeapWord* result = eden()->par_allocate(word_size);
918 if (result == NULL) {
919 // If the eden is full and the last collection bailed out, we are running
920 // out of heap space, and we try to allocate the from-space, too.
921 // allocate_from_space can't be inlined because that would introduce a
922 // circular dependency at compile time.
923 result = allocate_from_space(word_size);
924 }
925 return result;
926 }
927
928 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
929 bool is_tlab) {
930 return eden()->par_allocate(word_size);
931 }
932
933 size_t DefNewGeneration::tlab_capacity() const {
934 return eden()->capacity();
935 }
936
937 size_t DefNewGeneration::tlab_used() const {
938 return eden()->used();
939 }
940
941 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
942 return unsafe_max_alloc_nogc();
943 }
--- EOF ---