1 /*
  2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/serial/defNewGeneration.inline.hpp"
 27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 28 #include "gc/serial/serialHeap.inline.hpp"
 29 #include "gc/serial/tenuredGeneration.hpp"
 30 #include "gc/shared/adaptiveSizePolicy.hpp"
 31 #include "gc/shared/ageTable.inline.hpp"
 32 #include "gc/shared/cardTableRS.hpp"
 33 #include "gc/shared/collectorCounters.hpp"
 34 #include "gc/shared/gcArguments.hpp"
 35 #include "gc/shared/gcHeapSummary.hpp"
 36 #include "gc/shared/gcLocker.hpp"
 37 #include "gc/shared/gcPolicyCounters.hpp"
 38 #include "gc/shared/gcTimer.hpp"
 39 #include "gc/shared/gcTrace.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/generationSpec.hpp"
 42 #include "gc/shared/genOopClosures.inline.hpp"
 43 #include "gc/shared/preservedMarks.inline.hpp"
 44 #include "gc/shared/referencePolicy.hpp"
 45 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 46 #include "gc/shared/space.inline.hpp"
 47 #include "gc/shared/spaceDecorator.inline.hpp"
 48 #include "gc/shared/strongRootsScope.hpp"
 49 #include "gc/shared/weakProcessor.hpp"
 50 #include "logging/log.hpp"
 51 #include "memory/iterator.inline.hpp"
 52 #include "memory/resourceArea.hpp"
 53 #include "oops/instanceRefKlass.hpp"
 54 #include "oops/oop.inline.hpp"
 55 #include "runtime/java.hpp"
 56 #include "runtime/prefetch.inline.hpp"
 57 #include "runtime/thread.inline.hpp"
 58 #include "utilities/align.hpp"
 59 #include "utilities/copy.hpp"
 60 #include "utilities/globalDefinitions.hpp"
 61 #include "utilities/stack.inline.hpp"
 62 
 63 //
 64 // DefNewGeneration functions.
 65 
 66 // Methods of protected closure types.
 67 
 68 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
 69   assert(_young_gen->kind() == Generation::DefNew, "Expected the young generation here");
 70 }
 71 
 72 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
 73   return cast_from_oop<HeapWord*>(p) >= _young_gen->reserved().end() || p->is_forwarded();
 74 }
 75 
 76 DefNewGeneration::KeepAliveClosure::
 77 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
 78   _rs = GenCollectedHeap::heap()->rem_set();
 79 }
 80 
 81 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
 82 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
 83 
 84 
 85 DefNewGeneration::FastKeepAliveClosure::
 86 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
 87   DefNewGeneration::KeepAliveClosure(cl) {
 88   _boundary = g->reserved().end();
 89 }
 90 
 91 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
 92 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
 93 
 94 DefNewGeneration::FastEvacuateFollowersClosure::
 95 FastEvacuateFollowersClosure(SerialHeap* heap,
 96                              DefNewScanClosure* cur,
 97                              DefNewYoungerGenClosure* older) :
 98   _heap(heap), _scan_cur_or_nonheap(cur), _scan_older(older)
 99 {
100 }
101 
102 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
103   do {
104     _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap, _scan_older);
105   } while (!_heap->no_allocs_since_save_marks());
106   guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
107 }
108 
109 void CLDScanClosure::do_cld(ClassLoaderData* cld) {
110   NOT_PRODUCT(ResourceMark rm);
111   log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
112                                   p2i(cld),
113                                   cld->loader_name_and_id(),
114                                   cld->has_modified_oops() ? "true" : "false");
115 
116   // If the cld has not been dirtied we know that there's
117   // no references into  the young gen and we can skip it.
118   if (cld->has_modified_oops()) {
119 
120     // Tell the closure which CLD is being scanned so that it can be dirtied
121     // if oops are left pointing into the young gen.
122     _scavenge_closure->set_scanned_cld(cld);
123 
124     // Clean the cld since we're going to scavenge all the metadata.
125     cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
126 
127     _scavenge_closure->set_scanned_cld(NULL);
128   }
129 }
130 
131 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
132   _g(g)
133 {
134   _boundary = _g->reserved().end();
135 }
136 
137 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
138                                    size_t initial_size,
139                                    size_t min_size,
140                                    size_t max_size,
141                                    const char* policy)
142   : Generation(rs, initial_size),
143     _preserved_marks_set(false /* in_c_heap */),
144     _promo_failure_drain_in_progress(false),
145     _should_allocate_from_space(false)
146 {
147   MemRegion cmr((HeapWord*)_virtual_space.low(),
148                 (HeapWord*)_virtual_space.high());
149   GenCollectedHeap* gch = GenCollectedHeap::heap();
150 
151   gch->rem_set()->resize_covered_region(cmr);
152 
153   _eden_space = new ContiguousSpace();
154   _from_space = new ContiguousSpace();
155   _to_space   = new ContiguousSpace();
156 
157   // Compute the maximum eden and survivor space sizes. These sizes
158   // are computed assuming the entire reserved space is committed.
159   // These values are exported as performance counters.
160   uintx size = _virtual_space.reserved_size();
161   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
162   _max_eden_size = size - (2*_max_survivor_size);
163 
164   // allocate the performance counters
165 
166   // Generation counters -- generation 0, 3 subspaces
167   _gen_counters = new GenerationCounters("new", 0, 3,
168       min_size, max_size, &_virtual_space);
169   _gc_counters = new CollectorCounters(policy, 0);
170 
171   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
172                                       _gen_counters);
173   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
174                                       _gen_counters);
175   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
176                                     _gen_counters);
177 
178   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
179   update_counters();
180   _old_gen = NULL;
181   _tenuring_threshold = MaxTenuringThreshold;
182   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
183 
184   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
185 }
186 
187 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
188                                                 bool clear_space,
189                                                 bool mangle_space) {
190   // If the spaces are being cleared (only done at heap initialization
191   // currently), the survivor spaces need not be empty.
192   // Otherwise, no care is taken for used areas in the survivor spaces
193   // so check.
194   assert(clear_space || (to()->is_empty() && from()->is_empty()),
195     "Initialization of the survivor spaces assumes these are empty");
196 
197   // Compute sizes
198   uintx size = _virtual_space.committed_size();
199   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
200   uintx eden_size = size - (2*survivor_size);
201   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
202 
203   if (eden_size < minimum_eden_size) {
204     // May happen due to 64Kb rounding, if so adjust eden size back up
205     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
206     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
207     uintx unaligned_survivor_size =
208       align_down(maximum_survivor_size, SpaceAlignment);
209     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
210     eden_size = size - (2*survivor_size);
211     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
212     assert(eden_size >= minimum_eden_size, "just checking");
213   }
214 
215   char *eden_start = _virtual_space.low();
216   char *from_start = eden_start + eden_size;
217   char *to_start   = from_start + survivor_size;
218   char *to_end     = to_start   + survivor_size;
219 
220   assert(to_end == _virtual_space.high(), "just checking");
221   assert(Space::is_aligned(eden_start), "checking alignment");
222   assert(Space::is_aligned(from_start), "checking alignment");
223   assert(Space::is_aligned(to_start),   "checking alignment");
224 
225   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
226   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
227   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
228 
229   // A minimum eden size implies that there is a part of eden that
230   // is being used and that affects the initialization of any
231   // newly formed eden.
232   bool live_in_eden = minimum_eden_size > 0;
233 
234   // If not clearing the spaces, do some checking to verify that
235   // the space are already mangled.
236   if (!clear_space) {
237     // Must check mangling before the spaces are reshaped.  Otherwise,
238     // the bottom or end of one space may have moved into another
239     // a failure of the check may not correctly indicate which space
240     // is not properly mangled.
241     if (ZapUnusedHeapArea) {
242       HeapWord* limit = (HeapWord*) _virtual_space.high();
243       eden()->check_mangled_unused_area(limit);
244       from()->check_mangled_unused_area(limit);
245         to()->check_mangled_unused_area(limit);
246     }
247   }
248 
249   // Reset the spaces for their new regions.
250   eden()->initialize(edenMR,
251                      clear_space && !live_in_eden,
252                      SpaceDecorator::Mangle);
253   // If clear_space and live_in_eden, we will not have cleared any
254   // portion of eden above its top. This can cause newly
255   // expanded space not to be mangled if using ZapUnusedHeapArea.
256   // We explicitly do such mangling here.
257   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
258     eden()->mangle_unused_area();
259   }
260   from()->initialize(fromMR, clear_space, mangle_space);
261   to()->initialize(toMR, clear_space, mangle_space);
262 
263   // Set next compaction spaces.
264   eden()->set_next_compaction_space(from());
265   // The to-space is normally empty before a compaction so need
266   // not be considered.  The exception is during promotion
267   // failure handling when to-space can contain live objects.
268   from()->set_next_compaction_space(NULL);
269 }
270 
271 void DefNewGeneration::swap_spaces() {
272   ContiguousSpace* s = from();
273   _from_space        = to();
274   _to_space          = s;
275   eden()->set_next_compaction_space(from());
276   // The to-space is normally empty before a compaction so need
277   // not be considered.  The exception is during promotion
278   // failure handling when to-space can contain live objects.
279   from()->set_next_compaction_space(NULL);
280 
281   if (UsePerfData) {
282     CSpaceCounters* c = _from_counters;
283     _from_counters = _to_counters;
284     _to_counters = c;
285   }
286 }
287 
288 bool DefNewGeneration::expand(size_t bytes) {
289   MutexLocker x(ExpandHeap_lock);
290   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
291   bool success = _virtual_space.expand_by(bytes);
292   if (success && ZapUnusedHeapArea) {
293     // Mangle newly committed space immediately because it
294     // can be done here more simply that after the new
295     // spaces have been computed.
296     HeapWord* new_high = (HeapWord*) _virtual_space.high();
297     MemRegion mangle_region(prev_high, new_high);
298     SpaceMangler::mangle_region(mangle_region);
299   }
300 
301   // Do not attempt an expand-to-the reserve size.  The
302   // request should properly observe the maximum size of
303   // the generation so an expand-to-reserve should be
304   // unnecessary.  Also a second call to expand-to-reserve
305   // value potentially can cause an undue expansion.
306   // For example if the first expand fail for unknown reasons,
307   // but the second succeeds and expands the heap to its maximum
308   // value.
309   if (GCLocker::is_active()) {
310     log_debug(gc)("Garbage collection disabled, expanded heap instead");
311   }
312 
313   return success;
314 }
315 
316 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
317                                                     size_t new_size_before,
318                                                     size_t alignment) const {
319   size_t desired_new_size = new_size_before;
320 
321   if (NewSizeThreadIncrease > 0) {
322     int threads_count;
323     size_t thread_increase_size = 0;
324 
325     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
326     threads_count = Threads::number_of_non_daemon_threads();
327     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
328       thread_increase_size = threads_count * NewSizeThreadIncrease;
329 
330       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
331       if (new_size_candidate <= max_uintx - thread_increase_size) {
332         new_size_candidate += thread_increase_size;
333 
334         // 3. Check an overflow at 'align_up'.
335         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
336         if (new_size_candidate <= aligned_max) {
337           desired_new_size = align_up(new_size_candidate, alignment);
338         }
339       }
340     }
341   }
342 
343   return desired_new_size;
344 }
345 
346 void DefNewGeneration::compute_new_size() {
347   // This is called after a GC that includes the old generation, so from-space
348   // will normally be empty.
349   // Note that we check both spaces, since if scavenge failed they revert roles.
350   // If not we bail out (otherwise we would have to relocate the objects).
351   if (!from()->is_empty() || !to()->is_empty()) {
352     return;
353   }
354 
355   GenCollectedHeap* gch = GenCollectedHeap::heap();
356 
357   size_t old_size = gch->old_gen()->capacity();
358   size_t new_size_before = _virtual_space.committed_size();
359   size_t min_new_size = initial_size();
360   size_t max_new_size = reserved().byte_size();
361   assert(min_new_size <= new_size_before &&
362          new_size_before <= max_new_size,
363          "just checking");
364   // All space sizes must be multiples of Generation::GenGrain.
365   size_t alignment = Generation::GenGrain;
366 
367   int threads_count = 0;
368   size_t thread_increase_size = 0;
369 
370   size_t new_size_candidate = old_size / NewRatio;
371   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
372   // and reverts to previous value if any overflow happens
373   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
374 
375   // Adjust new generation size
376   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
377   assert(desired_new_size <= max_new_size, "just checking");
378 
379   bool changed = false;
380   if (desired_new_size > new_size_before) {
381     size_t change = desired_new_size - new_size_before;
382     assert(change % alignment == 0, "just checking");
383     if (expand(change)) {
384        changed = true;
385     }
386     // If the heap failed to expand to the desired size,
387     // "changed" will be false.  If the expansion failed
388     // (and at this point it was expected to succeed),
389     // ignore the failure (leaving "changed" as false).
390   }
391   if (desired_new_size < new_size_before && eden()->is_empty()) {
392     // bail out of shrinking if objects in eden
393     size_t change = new_size_before - desired_new_size;
394     assert(change % alignment == 0, "just checking");
395     _virtual_space.shrink_by(change);
396     changed = true;
397   }
398   if (changed) {
399     // The spaces have already been mangled at this point but
400     // may not have been cleared (set top = bottom) and should be.
401     // Mangling was done when the heap was being expanded.
402     compute_space_boundaries(eden()->used(),
403                              SpaceDecorator::Clear,
404                              SpaceDecorator::DontMangle);
405     MemRegion cmr((HeapWord*)_virtual_space.low(),
406                   (HeapWord*)_virtual_space.high());
407     gch->rem_set()->resize_covered_region(cmr);
408 
409     log_debug(gc, ergo, heap)(
410         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
411         new_size_before/K, _virtual_space.committed_size()/K,
412         eden()->capacity()/K, from()->capacity()/K);
413     log_trace(gc, ergo, heap)(
414         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
415           thread_increase_size/K, threads_count);
416       }
417 }
418 
419 
420 size_t DefNewGeneration::capacity() const {
421   return eden()->capacity()
422        + from()->capacity();  // to() is only used during scavenge
423 }
424 
425 
426 size_t DefNewGeneration::used() const {
427   return eden()->used()
428        + from()->used();      // to() is only used during scavenge
429 }
430 
431 
432 size_t DefNewGeneration::free() const {
433   return eden()->free()
434        + from()->free();      // to() is only used during scavenge
435 }
436 
437 size_t DefNewGeneration::max_capacity() const {
438   const size_t reserved_bytes = reserved().byte_size();
439   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
440 }
441 
442 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
443   return eden()->free();
444 }
445 
446 size_t DefNewGeneration::capacity_before_gc() const {
447   return eden()->capacity();
448 }
449 
450 size_t DefNewGeneration::contiguous_available() const {
451   return eden()->free();
452 }
453 
454 
455 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
456 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
457 
458 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
459   eden()->object_iterate(blk);
460   from()->object_iterate(blk);
461 }
462 
463 
464 void DefNewGeneration::space_iterate(SpaceClosure* blk,
465                                      bool usedOnly) {
466   blk->do_space(eden());
467   blk->do_space(from());
468   blk->do_space(to());
469 }
470 
471 // The last collection bailed out, we are running out of heap space,
472 // so we try to allocate the from-space, too.
473 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
474   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
475 
476   // If the Heap_lock is not locked by this thread, this will be called
477   // again later with the Heap_lock held.
478   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
479 
480   HeapWord* result = NULL;
481   if (do_alloc) {
482     result = from()->allocate(size);
483   }
484 
485   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
486                         size,
487                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
488                           "true" : "false",
489                         Heap_lock->is_locked() ? "locked" : "unlocked",
490                         from()->free(),
491                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
492                         do_alloc ? "  Heap_lock is not owned by self" : "",
493                         result == NULL ? "NULL" : "object");
494 
495   return result;
496 }
497 
498 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
499                                                 bool   is_tlab,
500                                                 bool   parallel) {
501   // We don't attempt to expand the young generation (but perhaps we should.)
502   return allocate(size, is_tlab);
503 }
504 
505 void DefNewGeneration::adjust_desired_tenuring_threshold() {
506   // Set the desired survivor size to half the real survivor space
507   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
508   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
509 
510   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
511 
512   if (UsePerfData) {
513     GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
514     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
515     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
516   }
517 
518   age_table()->print_age_table(_tenuring_threshold);
519 }
520 
521 void DefNewGeneration::collect(bool   full,
522                                bool   clear_all_soft_refs,
523                                size_t size,
524                                bool   is_tlab) {
525   assert(full || size > 0, "otherwise we don't want to collect");
526 
527   SerialHeap* heap = SerialHeap::heap();
528 
529   _gc_timer->register_gc_start();
530   DefNewTracer gc_tracer;
531   gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
532 
533   _old_gen = heap->old_gen();
534 
535   // If the next generation is too full to accommodate promotion
536   // from this generation, pass on collection; let the next generation
537   // do it.
538   if (!collection_attempt_is_safe()) {
539     log_trace(gc)(":: Collection attempt not safe ::");
540     heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
541     return;
542   }
543   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
544 
545   init_assuming_no_promotion_failure();
546 
547   GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, heap->gc_cause());
548 
549   heap->trace_heap_before_gc(&gc_tracer);
550 
551   // These can be shared for all code paths
552   IsAliveClosure is_alive(this);
553   ScanWeakRefClosure scan_weak_ref(this);
554 
555   age_table()->clear();
556   to()->clear(SpaceDecorator::Mangle);
557   // The preserved marks should be empty at the start of the GC.
558   _preserved_marks_set.init(1);
559 
560   assert(heap->no_allocs_since_save_marks(),
561          "save marks have not been newly set.");
562 
563   DefNewScanClosure       scan_closure(this);
564   DefNewYoungerGenClosure younger_gen_closure(this, _old_gen);
565 
566   CLDScanClosure cld_scan_closure(&scan_closure);
567 
568   set_promo_failure_scan_stack_closure(&scan_closure);
569   FastEvacuateFollowersClosure evacuate_followers(heap,
570                                                   &scan_closure,
571                                                   &younger_gen_closure);
572 
573   assert(heap->no_allocs_since_save_marks(),
574          "save marks have not been newly set.");
575 
576   {
577     StrongRootsScope srs(0);
578 
579     heap->young_process_roots(&scan_closure,
580                               &younger_gen_closure,
581                               &cld_scan_closure);
582   }
583 
584   // "evacuate followers".
585   evacuate_followers.do_void();
586 
587   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
588   ReferenceProcessor* rp = ref_processor();
589   rp->setup_policy(clear_all_soft_refs);
590   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
591   SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
592   const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
593   gc_tracer.report_gc_reference_stats(stats);
594   gc_tracer.report_tenuring_threshold(tenuring_threshold());
595   pt.print_all_references();
596 
597   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
598 
599   WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
600 
601   // Verify that the usage of keep_alive didn't copy any objects.
602   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
603 
604   if (!_promotion_failed) {
605     // Swap the survivor spaces.
606     eden()->clear(SpaceDecorator::Mangle);
607     from()->clear(SpaceDecorator::Mangle);
608     if (ZapUnusedHeapArea) {
609       // This is now done here because of the piece-meal mangling which
610       // can check for valid mangling at intermediate points in the
611       // collection(s).  When a young collection fails to collect
612       // sufficient space resizing of the young generation can occur
613       // an redistribute the spaces in the young generation.  Mangle
614       // here so that unzapped regions don't get distributed to
615       // other spaces.
616       to()->mangle_unused_area();
617     }
618     swap_spaces();
619 
620     assert(to()->is_empty(), "to space should be empty now");
621 
622     adjust_desired_tenuring_threshold();
623 
624     // A successful scavenge should restart the GC time limit count which is
625     // for full GC's.
626     AdaptiveSizePolicy* size_policy = heap->size_policy();
627     size_policy->reset_gc_overhead_limit_count();
628     assert(!heap->incremental_collection_failed(), "Should be clear");
629   } else {
630     assert(_promo_failure_scan_stack.is_empty(), "post condition");
631     _promo_failure_scan_stack.clear(true); // Clear cached segments.
632 
633     remove_forwarding_pointers();
634     log_info(gc, promotion)("Promotion failed");
635     // Add to-space to the list of space to compact
636     // when a promotion failure has occurred.  In that
637     // case there can be live objects in to-space
638     // as a result of a partial evacuation of eden
639     // and from-space.
640     swap_spaces();   // For uniformity wrt ParNewGeneration.
641     from()->set_next_compaction_space(to());
642     heap->set_incremental_collection_failed();
643 
644     // Inform the next generation that a promotion failure occurred.
645     _old_gen->promotion_failure_occurred();
646     gc_tracer.report_promotion_failed(_promotion_failed_info);
647 
648     // Reset the PromotionFailureALot counters.
649     NOT_PRODUCT(heap->reset_promotion_should_fail();)
650   }
651   // We should have processed and cleared all the preserved marks.
652   _preserved_marks_set.reclaim();
653 
654   heap->trace_heap_after_gc(&gc_tracer);
655 
656   _gc_timer->register_gc_end();
657 
658   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
659 }
660 
661 void DefNewGeneration::init_assuming_no_promotion_failure() {
662   _promotion_failed = false;
663   _promotion_failed_info.reset();
664   from()->set_next_compaction_space(NULL);
665 }
666 
667 void DefNewGeneration::remove_forwarding_pointers() {
668   RemoveForwardedPointerClosure rspc;
669   eden()->object_iterate(&rspc);
670   from()->object_iterate(&rspc);
671   restore_preserved_marks();
672 }
673 
674 void DefNewGeneration::restore_preserved_marks() {
675   _preserved_marks_set.restore(NULL);
676 }
677 
678 void DefNewGeneration::handle_promotion_failure(oop old) {
679   log_debug(gc, promotion)("Promotion failure size = %d) ", old->size());
680 
681   _promotion_failed = true;
682   _promotion_failed_info.register_copy_failure(old->size());
683   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
684   // forward to self
685   old->forward_to_self();
686 
687   _promo_failure_scan_stack.push(old);
688 
689   if (!_promo_failure_drain_in_progress) {
690     // prevent recursion in copy_to_survivor_space()
691     _promo_failure_drain_in_progress = true;
692     drain_promo_failure_scan_stack();
693     _promo_failure_drain_in_progress = false;
694   }
695 }
696 
697 oop DefNewGeneration::copy_to_survivor_space(oop old) {
698   assert(is_in_reserved(old) && !old->is_forwarded(),
699          "shouldn't be scavenging this oop");
700   size_t s = old->size();
701   oop obj = NULL;
702 
703   // Try allocating obj in to-space (unless too old)
704   if (old->age() < tenuring_threshold()) {
705     obj = cast_to_oop(to()->allocate(s));
706   }
707 
708   // Otherwise try allocating obj tenured
709   if (obj == NULL) {
710     obj = _old_gen->promote(old, s);
711     if (obj == NULL) {
712       handle_promotion_failure(old);
713       return old;
714     }
715   } else {
716     // Prefetch beyond obj
717     const intx interval = PrefetchCopyIntervalInBytes;
718     Prefetch::write(obj, interval);
719 
720     // Copy obj
721     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
722 
723     // Increment age if obj still in new generation
724     obj->incr_age();
725     age_table()->add(obj, s);
726   }
727 
728   // Done, insert forward pointer to obj in this header
729   old->forward_to(obj);
730 
731   return obj;
732 }
733 
734 void DefNewGeneration::drain_promo_failure_scan_stack() {
735   while (!_promo_failure_scan_stack.is_empty()) {
736      oop obj = _promo_failure_scan_stack.pop();
737      obj->oop_iterate(_promo_failure_scan_stack_closure);
738   }
739 }
740 
741 void DefNewGeneration::save_marks() {
742   eden()->set_saved_mark();
743   to()->set_saved_mark();
744   from()->set_saved_mark();
745 }
746 
747 
748 void DefNewGeneration::reset_saved_marks() {
749   eden()->reset_saved_mark();
750   to()->reset_saved_mark();
751   from()->reset_saved_mark();
752 }
753 
754 
755 bool DefNewGeneration::no_allocs_since_save_marks() {
756   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
757   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
758   return to()->saved_mark_at_top();
759 }
760 
761 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
762                                          size_t max_alloc_words) {
763   if (requestor == this || _promotion_failed) {
764     return;
765   }
766   assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
767 
768   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
769   if (to_space->top() > to_space->bottom()) {
770     trace("to_space not empty when contribute_scratch called");
771   }
772   */
773 
774   ContiguousSpace* to_space = to();
775   assert(to_space->end() >= to_space->top(), "pointers out of order");
776   size_t free_words = pointer_delta(to_space->end(), to_space->top());
777   if (free_words >= MinFreeScratchWords) {
778     ScratchBlock* sb = (ScratchBlock*)to_space->top();
779     sb->num_words = free_words;
780     sb->next = list;
781     list = sb;
782   }
783 }
784 
785 void DefNewGeneration::reset_scratch() {
786   // If contributing scratch in to_space, mangle all of
787   // to_space if ZapUnusedHeapArea.  This is needed because
788   // top is not maintained while using to-space as scratch.
789   if (ZapUnusedHeapArea) {
790     to()->mangle_unused_area_complete();
791   }
792 }
793 
794 bool DefNewGeneration::collection_attempt_is_safe() {
795   if (!to()->is_empty()) {
796     log_trace(gc)(":: to is not empty ::");
797     return false;
798   }
799   if (_old_gen == NULL) {
800     GenCollectedHeap* gch = GenCollectedHeap::heap();
801     _old_gen = gch->old_gen();
802   }
803   return _old_gen->promotion_attempt_is_safe(used());
804 }
805 
806 void DefNewGeneration::gc_epilogue(bool full) {
807   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
808 
809   assert(!GCLocker::is_active(), "We should not be executing here");
810   // Check if the heap is approaching full after a collection has
811   // been done.  Generally the young generation is empty at
812   // a minimum at the end of a collection.  If it is not, then
813   // the heap is approaching full.
814   GenCollectedHeap* gch = GenCollectedHeap::heap();
815   if (full) {
816     DEBUG_ONLY(seen_incremental_collection_failed = false;)
817     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
818       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
819                             GCCause::to_string(gch->gc_cause()));
820       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
821       set_should_allocate_from_space(); // we seem to be running out of space
822     } else {
823       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
824                             GCCause::to_string(gch->gc_cause()));
825       gch->clear_incremental_collection_failed(); // We just did a full collection
826       clear_should_allocate_from_space(); // if set
827     }
828   } else {
829 #ifdef ASSERT
830     // It is possible that incremental_collection_failed() == true
831     // here, because an attempted scavenge did not succeed. The policy
832     // is normally expected to cause a full collection which should
833     // clear that condition, so we should not be here twice in a row
834     // with incremental_collection_failed() == true without having done
835     // a full collection in between.
836     if (!seen_incremental_collection_failed &&
837         gch->incremental_collection_failed()) {
838       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
839                             GCCause::to_string(gch->gc_cause()));
840       seen_incremental_collection_failed = true;
841     } else if (seen_incremental_collection_failed) {
842       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
843                             GCCause::to_string(gch->gc_cause()));
844       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
845              !gch->incremental_collection_failed(),
846              "Twice in a row");
847       seen_incremental_collection_failed = false;
848     }
849 #endif // ASSERT
850   }
851 
852   if (ZapUnusedHeapArea) {
853     eden()->check_mangled_unused_area_complete();
854     from()->check_mangled_unused_area_complete();
855     to()->check_mangled_unused_area_complete();
856   }
857 
858   // update the generation and space performance counters
859   update_counters();
860   gch->counters()->update_counters();
861 }
862 
863 void DefNewGeneration::record_spaces_top() {
864   assert(ZapUnusedHeapArea, "Not mangling unused space");
865   eden()->set_top_for_allocations();
866   to()->set_top_for_allocations();
867   from()->set_top_for_allocations();
868 }
869 
870 void DefNewGeneration::ref_processor_init() {
871   Generation::ref_processor_init();
872 }
873 
874 
875 void DefNewGeneration::update_counters() {
876   if (UsePerfData) {
877     _eden_counters->update_all();
878     _from_counters->update_all();
879     _to_counters->update_all();
880     _gen_counters->update_all();
881   }
882 }
883 
884 void DefNewGeneration::verify() {
885   eden()->verify();
886   from()->verify();
887     to()->verify();
888 }
889 
890 void DefNewGeneration::print_on(outputStream* st) const {
891   Generation::print_on(st);
892   st->print("  eden");
893   eden()->print_on(st);
894   st->print("  from");
895   from()->print_on(st);
896   st->print("  to  ");
897   to()->print_on(st);
898 }
899 
900 
901 const char* DefNewGeneration::name() const {
902   return "def new generation";
903 }
904 
905 // Moved from inline file as they are not called inline
906 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
907   return eden();
908 }
909 
910 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
911   // This is the slow-path allocation for the DefNewGeneration.
912   // Most allocations are fast-path in compiled code.
913   // We try to allocate from the eden.  If that works, we are happy.
914   // Note that since DefNewGeneration supports lock-free allocation, we
915   // have to use it here, as well.
916   HeapWord* result = eden()->par_allocate(word_size);
917   if (result == NULL) {
918     // If the eden is full and the last collection bailed out, we are running
919     // out of heap space, and we try to allocate the from-space, too.
920     // allocate_from_space can't be inlined because that would introduce a
921     // circular dependency at compile time.
922     result = allocate_from_space(word_size);
923   }
924   return result;
925 }
926 
927 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
928                                          bool is_tlab) {
929   return eden()->par_allocate(word_size);
930 }
931 
932 size_t DefNewGeneration::tlab_capacity() const {
933   return eden()->capacity();
934 }
935 
936 size_t DefNewGeneration::tlab_used() const {
937   return eden()->used();
938 }
939 
940 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
941   return unsafe_max_alloc_nogc();
942 }