1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/serial/cardTableRS.hpp"
 27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 28 #include "gc/serial/serialHeap.inline.hpp"
 29 #include "gc/serial/serialStringDedup.inline.hpp"
 30 #include "gc/serial/tenuredGeneration.hpp"
 31 #include "gc/shared/adaptiveSizePolicy.hpp"
 32 #include "gc/shared/ageTable.inline.hpp"
 33 #include "gc/shared/collectorCounters.hpp"
 34 #include "gc/shared/continuationGCSupport.inline.hpp"
 35 #include "gc/shared/gcArguments.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.hpp"
 38 #include "gc/shared/gcPolicyCounters.hpp"
 39 #include "gc/shared/gcTimer.hpp"
 40 #include "gc/shared/gcTrace.hpp"
 41 #include "gc/shared/gcTraceTime.inline.hpp"
 42 #include "gc/shared/referencePolicy.hpp"
 43 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 44 #include "gc/shared/space.hpp"
 45 #include "gc/shared/spaceDecorator.hpp"
 46 #include "gc/shared/strongRootsScope.hpp"
 47 #include "gc/shared/weakProcessor.hpp"
 48 #include "logging/log.hpp"
 49 #include "memory/iterator.inline.hpp"
 50 #include "memory/resourceArea.hpp"
 51 #include "oops/instanceRefKlass.hpp"
 52 #include "oops/oop.inline.hpp"
 53 #include "runtime/java.hpp"
 54 #include "runtime/javaThread.hpp"
 55 #include "runtime/prefetch.inline.hpp"
 56 #include "runtime/threads.hpp"
 57 #include "utilities/align.hpp"
 58 #include "utilities/copy.hpp"
 59 #include "utilities/globalDefinitions.hpp"
 60 #include "utilities/stack.inline.hpp"
 61 
 62 class PromoteFailureClosure : public InHeapScanClosure {
 63   template <typename T>
 64   void do_oop_work(T* p) {
 65     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
 66     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
 67 
 68     try_scavenge(p, [] (auto) {});
 69   }
 70 public:
 71   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
 72 
 73   void do_oop(oop* p)       { do_oop_work(p); }
 74   void do_oop(narrowOop* p) { do_oop_work(p); }
 75 };
 76 
 77 class RootScanClosure : public OffHeapScanClosure {
 78   template <typename T>
 79   void do_oop_work(T* p) {
 80     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 81 
 82     try_scavenge(p,  [] (auto) {});
 83   }
 84 public:
 85   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
 86 
 87   void do_oop(oop* p)       { do_oop_work(p); }
 88   void do_oop(narrowOop* p) { do_oop_work(p); }
 89 };
 90 
 91 class CLDScanClosure: public CLDClosure {
 92 
 93   class CLDOopClosure : public OffHeapScanClosure {
 94     ClassLoaderData* _scanned_cld;
 95 
 96     template <typename T>
 97     void do_oop_work(T* p) {
 98       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 99 
100       try_scavenge(p, [&] (oop new_obj) {
101         assert(_scanned_cld != nullptr, "inv");
102         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
103           _scanned_cld->record_modified_oops();
104         }
105       });
106     }
107 
108   public:
109     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
110       _scanned_cld(nullptr) {}
111 
112     void set_scanned_cld(ClassLoaderData* cld) {
113       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
114       _scanned_cld = cld;
115     }
116 
117     void do_oop(oop* p)       { do_oop_work(p); }
118     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
119   };
120 
121   CLDOopClosure _oop_closure;
122  public:
123   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
124 
125   void do_cld(ClassLoaderData* cld) {
126     // If the cld has not been dirtied we know that there's
127     // no references into  the young gen and we can skip it.
128     if (cld->has_modified_oops()) {
129 
130       // Tell the closure which CLD is being scanned so that it can be dirtied
131       // if oops are left pointing into the young gen.
132       _oop_closure.set_scanned_cld(cld);
133 
134       // Clean the cld since we're going to scavenge all the metadata.
135       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
136 
137       _oop_closure.set_scanned_cld(nullptr);
138     }
139   }
140 };
141 
142 class IsAliveClosure: public BoolObjectClosure {
143   HeapWord*         _young_gen_end;
144 public:
145   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
146 
147   bool do_object_b(oop p) {
148     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
149   }
150 };
151 
152 class AdjustWeakRootClosure: public OffHeapScanClosure {
153   template <class T>
154   void do_oop_work(T* p) {
155     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
156     assert(!heap->is_in_reserved(p), "outside the heap");
157 
158     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
159     if (is_in_young_gen(obj)) {
160       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
161       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
162       oop new_obj = obj->forwardee();
163       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
164     }
165   }
166  public:
167   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
168 
169   void do_oop(oop* p)       { do_oop_work(p); }
170   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
171 };
172 
173 class KeepAliveClosure: public OopClosure {
174   DefNewGeneration* _young_gen;
175   HeapWord*         _young_gen_end;
176   CardTableRS* _rs;
177 
178   bool is_in_young_gen(void* p) const {
179     return p < _young_gen_end;
180   }
181 
182   template <class T>
183   void do_oop_work(T* p) {
184     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
185 
186     if (is_in_young_gen(obj)) {
187       oop new_obj = obj->is_forwarded() ? obj->forwardee()
188                                         : _young_gen->copy_to_survivor_space(obj);
189       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
190 
191       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
192         _rs->inline_write_ref_field_gc(p);
193       }
194     }
195   }
196 public:
197   KeepAliveClosure(DefNewGeneration* g) :
198     _young_gen(g),
199     _young_gen_end(g->reserved().end()),
200     _rs(SerialHeap::heap()->rem_set()) {}
201 
202   void do_oop(oop* p)       { do_oop_work(p); }
203   void do_oop(narrowOop* p) { do_oop_work(p); }
204 };
205 
206 class FastEvacuateFollowersClosure: public VoidClosure {
207   SerialHeap* _heap;
208   YoungGenScanClosure* _young_cl;
209   OldGenScanClosure* _old_cl;
210 public:
211   FastEvacuateFollowersClosure(SerialHeap* heap,
212                                YoungGenScanClosure* young_cl,
213                                OldGenScanClosure* old_cl) :
214     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
215   {}
216 
217   void do_void() {
218     _heap->scan_evacuated_objs(_young_cl, _old_cl);
219   }
220 };
221 
222 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
223                                    size_t initial_size,
224                                    size_t min_size,
225                                    size_t max_size,
226                                    const char* policy)
227   : Generation(rs, initial_size),
228     _promotion_failed(false),
229     _promo_failure_drain_in_progress(false),
230     _should_allocate_from_space(false),
231     _string_dedup_requests()
232 {
233   MemRegion cmr((HeapWord*)_virtual_space.low(),
234                 (HeapWord*)_virtual_space.high());
235   SerialHeap* gch = SerialHeap::heap();
236 
237   gch->rem_set()->resize_covered_region(cmr);
238 
239   _eden_space = new ContiguousSpace();
240   _from_space = new ContiguousSpace();
241   _to_space   = new ContiguousSpace();
242 
243   // Compute the maximum eden and survivor space sizes. These sizes
244   // are computed assuming the entire reserved space is committed.
245   // These values are exported as performance counters.
246   uintx size = _virtual_space.reserved_size();
247   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
248   _max_eden_size = size - (2*_max_survivor_size);
249 
250   // allocate the performance counters
251 
252   // Generation counters -- generation 0, 3 subspaces
253   _gen_counters = new GenerationCounters("new", 0, 3,
254       min_size, max_size, &_virtual_space);
255   _gc_counters = new CollectorCounters(policy, 0);
256 
257   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
258                                       _gen_counters);
259   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
260                                       _gen_counters);
261   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
262                                     _gen_counters);
263 
264   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
265   update_counters();
266   _old_gen = nullptr;
267   _tenuring_threshold = MaxTenuringThreshold;
268   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
269 
270   _ref_processor = nullptr;
271 
272   _gc_timer = new STWGCTimer();
273 
274   _gc_tracer = new DefNewTracer();
275 }
276 
277 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
278                                                 bool clear_space,
279                                                 bool mangle_space) {
280   // If the spaces are being cleared (only done at heap initialization
281   // currently), the survivor spaces need not be empty.
282   // Otherwise, no care is taken for used areas in the survivor spaces
283   // so check.
284   assert(clear_space || (to()->is_empty() && from()->is_empty()),
285     "Initialization of the survivor spaces assumes these are empty");
286 
287   // Compute sizes
288   uintx size = _virtual_space.committed_size();
289   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
290   uintx eden_size = size - (2*survivor_size);
291   if (eden_size > max_eden_size()) {
292     // Need to reduce eden_size to satisfy the max constraint. The delta needs
293     // to be 2*SpaceAlignment aligned so that both survivors are properly
294     // aligned.
295     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
296     eden_size     -= eden_delta;
297     survivor_size += eden_delta/2;
298   }
299   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
300 
301   if (eden_size < minimum_eden_size) {
302     // May happen due to 64Kb rounding, if so adjust eden size back up
303     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
304     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
305     uintx unaligned_survivor_size =
306       align_down(maximum_survivor_size, SpaceAlignment);
307     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
308     eden_size = size - (2*survivor_size);
309     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
310     assert(eden_size >= minimum_eden_size, "just checking");
311   }
312 
313   char *eden_start = _virtual_space.low();
314   char *from_start = eden_start + eden_size;
315   char *to_start   = from_start + survivor_size;
316   char *to_end     = to_start   + survivor_size;
317 
318   assert(to_end == _virtual_space.high(), "just checking");
319   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
320   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
321   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
322 
323   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
324   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
325   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
326 
327   // A minimum eden size implies that there is a part of eden that
328   // is being used and that affects the initialization of any
329   // newly formed eden.
330   bool live_in_eden = minimum_eden_size > 0;
331 
332   // Reset the spaces for their new regions.
333   eden()->initialize(edenMR,
334                      clear_space && !live_in_eden,
335                      SpaceDecorator::Mangle);
336   // If clear_space and live_in_eden, we will not have cleared any
337   // portion of eden above its top. This can cause newly
338   // expanded space not to be mangled if using ZapUnusedHeapArea.
339   // We explicitly do such mangling here.
340   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
341     eden()->mangle_unused_area();
342   }
343   from()->initialize(fromMR, clear_space, mangle_space);
344   to()->initialize(toMR, clear_space, mangle_space);
345 }
346 
347 void DefNewGeneration::swap_spaces() {
348   ContiguousSpace* s = from();
349   _from_space        = to();
350   _to_space          = s;
351 
352   if (UsePerfData) {
353     CSpaceCounters* c = _from_counters;
354     _from_counters = _to_counters;
355     _to_counters = c;
356   }
357 }
358 
359 bool DefNewGeneration::expand(size_t bytes) {
360   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
361   bool success = _virtual_space.expand_by(bytes);
362   if (success && ZapUnusedHeapArea) {
363     // Mangle newly committed space immediately because it
364     // can be done here more simply that after the new
365     // spaces have been computed.
366     HeapWord* new_high = (HeapWord*) _virtual_space.high();
367     MemRegion mangle_region(prev_high, new_high);
368     SpaceMangler::mangle_region(mangle_region);
369   }
370 
371   // Do not attempt an expand-to-the reserve size.  The
372   // request should properly observe the maximum size of
373   // the generation so an expand-to-reserve should be
374   // unnecessary.  Also a second call to expand-to-reserve
375   // value potentially can cause an undue expansion.
376   // For example if the first expand fail for unknown reasons,
377   // but the second succeeds and expands the heap to its maximum
378   // value.
379   if (GCLocker::is_active()) {
380     log_debug(gc)("Garbage collection disabled, expanded heap instead");
381   }
382 
383   return success;
384 }
385 
386 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
387     size_t thread_increase_size = 0;
388     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
389     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
390       thread_increase_size = threads_count * NewSizeThreadIncrease;
391     }
392     return thread_increase_size;
393 }
394 
395 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
396                                                     size_t new_size_before,
397                                                     size_t alignment,
398                                                     size_t thread_increase_size) const {
399   size_t desired_new_size = new_size_before;
400 
401   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
402 
403     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
404     if (new_size_candidate <= max_uintx - thread_increase_size) {
405       new_size_candidate += thread_increase_size;
406 
407       // 2. Check an overflow at 'align_up'.
408       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
409       if (new_size_candidate <= aligned_max) {
410         desired_new_size = align_up(new_size_candidate, alignment);
411       }
412     }
413   }
414 
415   return desired_new_size;
416 }
417 
418 void DefNewGeneration::compute_new_size() {
419   // This is called after a GC that includes the old generation, so from-space
420   // will normally be empty.
421   // Note that we check both spaces, since if scavenge failed they revert roles.
422   // If not we bail out (otherwise we would have to relocate the objects).
423   if (!from()->is_empty() || !to()->is_empty()) {
424     return;
425   }
426 
427   SerialHeap* gch = SerialHeap::heap();
428 
429   size_t old_size = gch->old_gen()->capacity();
430   size_t new_size_before = _virtual_space.committed_size();
431   size_t min_new_size = NewSize;
432   size_t max_new_size = reserved().byte_size();
433   assert(min_new_size <= new_size_before &&
434          new_size_before <= max_new_size,
435          "just checking");
436   // All space sizes must be multiples of Generation::GenGrain.
437   size_t alignment = Generation::GenGrain;
438 
439   int threads_count = Threads::number_of_non_daemon_threads();
440   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
441 
442   size_t new_size_candidate = old_size / NewRatio;
443   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
444   // and reverts to previous value if any overflow happens
445   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
446                                                        alignment, thread_increase_size);
447 
448   // Adjust new generation size
449   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
450   assert(desired_new_size <= max_new_size, "just checking");
451 
452   bool changed = false;
453   if (desired_new_size > new_size_before) {
454     size_t change = desired_new_size - new_size_before;
455     assert(change % alignment == 0, "just checking");
456     if (expand(change)) {
457        changed = true;
458     }
459     // If the heap failed to expand to the desired size,
460     // "changed" will be false.  If the expansion failed
461     // (and at this point it was expected to succeed),
462     // ignore the failure (leaving "changed" as false).
463   }
464   if (desired_new_size < new_size_before && eden()->is_empty()) {
465     // bail out of shrinking if objects in eden
466     size_t change = new_size_before - desired_new_size;
467     assert(change % alignment == 0, "just checking");
468     _virtual_space.shrink_by(change);
469     changed = true;
470   }
471   if (changed) {
472     // The spaces have already been mangled at this point but
473     // may not have been cleared (set top = bottom) and should be.
474     // Mangling was done when the heap was being expanded.
475     compute_space_boundaries(eden()->used(),
476                              SpaceDecorator::Clear,
477                              SpaceDecorator::DontMangle);
478     MemRegion cmr((HeapWord*)_virtual_space.low(),
479                   (HeapWord*)_virtual_space.high());
480     gch->rem_set()->resize_covered_region(cmr);
481 
482     log_debug(gc, ergo, heap)(
483         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
484         new_size_before/K, _virtual_space.committed_size()/K,
485         eden()->capacity()/K, from()->capacity()/K);
486     log_trace(gc, ergo, heap)(
487         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
488           thread_increase_size/K, threads_count);
489       }
490 }
491 
492 void DefNewGeneration::ref_processor_init() {
493   assert(_ref_processor == nullptr, "a reference processor already exists");
494   assert(!_reserved.is_empty(), "empty generation?");
495   _span_based_discoverer.set_span(_reserved);
496   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
497 }
498 
499 size_t DefNewGeneration::capacity() const {
500   return eden()->capacity()
501        + from()->capacity();  // to() is only used during scavenge
502 }
503 
504 
505 size_t DefNewGeneration::used() const {
506   return eden()->used()
507        + from()->used();      // to() is only used during scavenge
508 }
509 
510 
511 size_t DefNewGeneration::free() const {
512   return eden()->free()
513        + from()->free();      // to() is only used during scavenge
514 }
515 
516 size_t DefNewGeneration::max_capacity() const {
517   const size_t reserved_bytes = reserved().byte_size();
518   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
519 }
520 
521 bool DefNewGeneration::is_in(const void* p) const {
522   return eden()->is_in(p)
523       || from()->is_in(p)
524       || to()  ->is_in(p);
525 }
526 
527 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
528   return eden()->free();
529 }
530 
531 size_t DefNewGeneration::capacity_before_gc() const {
532   return eden()->capacity();
533 }
534 
535 size_t DefNewGeneration::contiguous_available() const {
536   return eden()->free();
537 }
538 
539 
540 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
541   eden()->object_iterate(blk);
542   from()->object_iterate(blk);
543 }
544 
545 // If "p" is in the space, returns the address of the start of the
546 // "block" that contains "p".  We say "block" instead of "object" since
547 // some heaps may not pack objects densely; a chunk may either be an
548 // object or a non-object.  If "p" is not in the space, return null.
549 // Very general, slow implementation.
550 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
551   assert(MemRegion(cs->bottom(), cs->end()).contains(p),
552          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
553          p2i(p), p2i(cs->bottom()), p2i(cs->end()));
554   if (p >= cs->top()) {
555     return cs->top();
556   } else {
557     HeapWord* last = cs->bottom();
558     HeapWord* cur = last;
559     while (cur <= p) {
560       last = cur;
561       cur += cast_to_oop(cur)->size();
562     }
563     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
564     return last;
565   }
566 }
567 
568 HeapWord* DefNewGeneration::block_start(const void* p) const {
569   if (eden()->is_in_reserved(p)) {
570     return block_start_const(eden(), p);
571   }
572   if (from()->is_in_reserved(p)) {
573     return block_start_const(from(), p);
574   }
575   assert(to()->is_in_reserved(p), "inv");
576   return block_start_const(to(), p);
577 }
578 
579 // The last collection bailed out, we are running out of heap space,
580 // so we try to allocate the from-space, too.
581 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
582   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
583 
584   // If the Heap_lock is not locked by this thread, this will be called
585   // again later with the Heap_lock held.
586   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
587 
588   HeapWord* result = nullptr;
589   if (do_alloc) {
590     result = from()->allocate(size);
591   }
592 
593   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
594                         size,
595                         SerialHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
596                           "true" : "false",
597                         Heap_lock->is_locked() ? "locked" : "unlocked",
598                         from()->free(),
599                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
600                         do_alloc ? "  Heap_lock is not owned by self" : "",
601                         result == nullptr ? "null" : "object");
602 
603   return result;
604 }
605 
606 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
607   // We don't attempt to expand the young generation (but perhaps we should.)
608   return allocate(size, is_tlab);
609 }
610 
611 void DefNewGeneration::adjust_desired_tenuring_threshold() {
612   // Set the desired survivor size to half the real survivor space
613   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
614   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
615 
616   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
617 
618   if (UsePerfData) {
619     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
620     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
621     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
622   }
623 
624   age_table()->print_age_table();
625 }
626 
627 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
628   SerialHeap* heap = SerialHeap::heap();
629 
630   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
631   _gc_timer->register_gc_start();
632   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
633   _ref_processor->start_discovery(clear_all_soft_refs);
634 
635   _old_gen = heap->old_gen();
636 
637   init_assuming_no_promotion_failure();
638 
639   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
640 
641   heap->trace_heap_before_gc(_gc_tracer);
642 
643   // These can be shared for all code paths
644   IsAliveClosure is_alive(this);
645 
646   age_table()->clear();
647   to()->clear(SpaceDecorator::Mangle);
648 
649   YoungGenScanClosure young_gen_cl(this);
650   OldGenScanClosure   old_gen_cl(this);
651 
652   FastEvacuateFollowersClosure evacuate_followers(heap,
653                                                   &young_gen_cl,
654                                                   &old_gen_cl);
655 
656   {
657     StrongRootsScope srs(0);
658     RootScanClosure root_cl{this};
659     CLDScanClosure cld_cl{this};
660 
661     MarkingNMethodClosure code_cl(&root_cl,
662                                   NMethodToOopClosure::FixRelocations,
663                                   false /* keepalive_nmethods */);
664 
665     HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
666     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
667                         &root_cl,
668                         &cld_cl,
669                         &cld_cl,
670                         &code_cl);
671 
672     _old_gen->scan_old_to_young_refs(saved_top_in_old_gen);
673   }
674 
675   // "evacuate followers".
676   evacuate_followers.do_void();
677 
678   {
679     // Reference processing
680     KeepAliveClosure keep_alive(this);
681     ReferenceProcessor* rp = ref_processor();
682     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
683     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
684     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
685     _gc_tracer->report_gc_reference_stats(stats);
686     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
687     pt.print_all_references();
688   }
689 
690   {
691     AdjustWeakRootClosure cl{this};
692     WeakProcessor::weak_oops_do(&is_alive, &cl);
693   }
694 
695   _string_dedup_requests.flush();
696 
697   if (!_promotion_failed) {
698     // Swap the survivor spaces.
699     eden()->clear(SpaceDecorator::Mangle);
700     from()->clear(SpaceDecorator::Mangle);
701     swap_spaces();
702 
703     assert(to()->is_empty(), "to space should be empty now");
704 
705     adjust_desired_tenuring_threshold();
706 
707     assert(!heap->incremental_collection_failed(), "Should be clear");
708   } else {
709     assert(_promo_failure_scan_stack.is_empty(), "post condition");
710     _promo_failure_scan_stack.clear(true); // Clear cached segments.
711 
712     remove_forwarding_pointers();
713     log_info(gc, promotion)("Promotion failed");
714     // Add to-space to the list of space to compact
715     // when a promotion failure has occurred.  In that
716     // case there can be live objects in to-space
717     // as a result of a partial evacuation of eden
718     // and from-space.
719     swap_spaces();   // For uniformity wrt ParNewGeneration.
720     heap->set_incremental_collection_failed();
721 
722     _gc_tracer->report_promotion_failed(_promotion_failed_info);
723 
724     // Reset the PromotionFailureALot counters.
725     NOT_PRODUCT(heap->reset_promotion_should_fail();)
726   }
727 
728   heap->trace_heap_after_gc(_gc_tracer);
729 
730   _gc_timer->register_gc_end();
731 
732   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
733 
734   return !_promotion_failed;
735 }
736 
737 void DefNewGeneration::init_assuming_no_promotion_failure() {
738   _promotion_failed = false;
739   _promotion_failed_info.reset();
740 }
741 
742 void DefNewGeneration::remove_forwarding_pointers() {
743   assert(_promotion_failed, "precondition");
744 
745   // Will enter Full GC soon due to failed promotion. Must reset the mark word
746   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
747   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
748   struct ResetForwardedMarkWord : ObjectClosure {
749     void do_object(oop obj) override {
750       if (obj->is_self_forwarded()) {
751         obj->unset_self_forwarded();
752       } else if (obj->is_forwarded()) {
753         obj->forward_safe_init_mark();
754       }
755     }
756   } cl;
757   eden()->object_iterate(&cl);
758   from()->object_iterate(&cl);
759 }
760 
761 void DefNewGeneration::handle_promotion_failure(oop old) {
762   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
763 
764   _promotion_failed = true;
765   _promotion_failed_info.register_copy_failure(old->size());
766 
767   ContinuationGCSupport::transform_stack_chunk(old);
768 
769   // forward to self
770   old->forward_to_self();
771 
772   _promo_failure_scan_stack.push(old);
773 
774   if (!_promo_failure_drain_in_progress) {
775     // prevent recursion in copy_to_survivor_space()
776     _promo_failure_drain_in_progress = true;
777     drain_promo_failure_scan_stack();
778     _promo_failure_drain_in_progress = false;
779   }
780 }
781 
782 oop DefNewGeneration::copy_to_survivor_space(oop old) {
783   assert(is_in_reserved(old) && !old->is_forwarded(),
784          "shouldn't be scavenging this oop");
785   size_t s = old->size();
786   oop obj = nullptr;
787 
788   // Try allocating obj in to-space (unless too old)
789   if (old->age() < tenuring_threshold()) {
790     obj = cast_to_oop(to()->allocate(s));
791   }
792 
793   bool new_obj_is_tenured = false;
794   // Otherwise try allocating obj tenured
795   if (obj == nullptr) {
796     obj = _old_gen->promote(old, s);
797     if (obj == nullptr) {
798       handle_promotion_failure(old);
799       return old;
800     }
801 
802     ContinuationGCSupport::transform_stack_chunk(obj);
803 
804     new_obj_is_tenured = true;
805   } else {
806     // Prefetch beyond obj
807     const intx interval = PrefetchCopyIntervalInBytes;
808     Prefetch::write(obj, interval);
809 
810     // Copy obj
811     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
812 
813     ContinuationGCSupport::transform_stack_chunk(obj);
814 
815     // Increment age if obj still in new generation
816     obj->incr_age();
817     age_table()->add(obj, s);
818   }
819 
820   // Done, insert forward pointer to obj in this header
821   old->forward_to(obj);
822 
823   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
824     // Record old; request adds a new weak reference, which reference
825     // processing expects to refer to a from-space object.
826     _string_dedup_requests.add(old);
827   }
828   return obj;
829 }
830 
831 void DefNewGeneration::drain_promo_failure_scan_stack() {
832   PromoteFailureClosure cl{this};
833   while (!_promo_failure_scan_stack.is_empty()) {
834      oop obj = _promo_failure_scan_stack.pop();
835      obj->oop_iterate(&cl);
836   }
837 }
838 
839 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
840   if (_promotion_failed) {
841     return;
842   }
843 
844   const size_t MinFreeScratchWords = 100;
845 
846   ContiguousSpace* to_space = to();
847   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
848   if (free_words >= MinFreeScratchWords) {
849     scratch = to_space->top();
850     num_words = free_words;
851   }
852 }
853 
854 void DefNewGeneration::reset_scratch() {
855   // If contributing scratch in to_space, mangle all of
856   // to_space if ZapUnusedHeapArea.  This is needed because
857   // top is not maintained while using to-space as scratch.
858   if (ZapUnusedHeapArea) {
859     to()->mangle_unused_area();
860   }
861 }
862 
863 bool DefNewGeneration::collection_attempt_is_safe() {
864   if (!to()->is_empty()) {
865     log_trace(gc)(":: to is not empty ::");
866     return false;
867   }
868   if (_old_gen == nullptr) {
869     _old_gen = SerialHeap::heap()->old_gen();
870   }
871   return _old_gen->promotion_attempt_is_safe(used());
872 }
873 
874 void DefNewGeneration::gc_epilogue(bool full) {
875   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
876 
877   assert(!GCLocker::is_active(), "We should not be executing here");
878   // Check if the heap is approaching full after a collection has
879   // been done.  Generally the young generation is empty at
880   // a minimum at the end of a collection.  If it is not, then
881   // the heap is approaching full.
882   SerialHeap* gch = SerialHeap::heap();
883   if (full) {
884     DEBUG_ONLY(seen_incremental_collection_failed = false;)
885     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
886       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
887                             GCCause::to_string(gch->gc_cause()));
888       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
889       set_should_allocate_from_space(); // we seem to be running out of space
890     } else {
891       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
892                             GCCause::to_string(gch->gc_cause()));
893       gch->clear_incremental_collection_failed(); // We just did a full collection
894       clear_should_allocate_from_space(); // if set
895     }
896   } else {
897 #ifdef ASSERT
898     // It is possible that incremental_collection_failed() == true
899     // here, because an attempted scavenge did not succeed. The policy
900     // is normally expected to cause a full collection which should
901     // clear that condition, so we should not be here twice in a row
902     // with incremental_collection_failed() == true without having done
903     // a full collection in between.
904     if (!seen_incremental_collection_failed &&
905         gch->incremental_collection_failed()) {
906       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
907                             GCCause::to_string(gch->gc_cause()));
908       seen_incremental_collection_failed = true;
909     } else if (seen_incremental_collection_failed) {
910       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
911                             GCCause::to_string(gch->gc_cause()));
912       seen_incremental_collection_failed = false;
913     }
914 #endif // ASSERT
915   }
916 
917   // update the generation and space performance counters
918   update_counters();
919   gch->counters()->update_counters();
920 }
921 
922 void DefNewGeneration::update_counters() {
923   if (UsePerfData) {
924     _eden_counters->update_all();
925     _from_counters->update_all();
926     _to_counters->update_all();
927     _gen_counters->update_all();
928   }
929 }
930 
931 void DefNewGeneration::verify() {
932   eden()->verify();
933   from()->verify();
934     to()->verify();
935 }
936 
937 void DefNewGeneration::print_on(outputStream* st) const {
938   Generation::print_on(st);
939   st->print("  eden");
940   eden()->print_on(st);
941   st->print("  from");
942   from()->print_on(st);
943   st->print("  to  ");
944   to()->print_on(st);
945 }
946 
947 
948 const char* DefNewGeneration::name() const {
949   return "def new generation";
950 }
951 
952 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
953   // This is the slow-path allocation for the DefNewGeneration.
954   // Most allocations are fast-path in compiled code.
955   // We try to allocate from the eden.  If that works, we are happy.
956   // Note that since DefNewGeneration supports lock-free allocation, we
957   // have to use it here, as well.
958   HeapWord* result = eden()->par_allocate(word_size);
959   if (result == nullptr) {
960     // If the eden is full and the last collection bailed out, we are running
961     // out of heap space, and we try to allocate the from-space, too.
962     // allocate_from_space can't be inlined because that would introduce a
963     // circular dependency at compile time.
964     result = allocate_from_space(word_size);
965   }
966   return result;
967 }
968 
969 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
970                                          bool is_tlab) {
971   return eden()->par_allocate(word_size);
972 }
973 
974 size_t DefNewGeneration::tlab_capacity() const {
975   return eden()->capacity();
976 }
977 
978 size_t DefNewGeneration::tlab_used() const {
979   return eden()->used();
980 }
981 
982 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
983   return unsafe_max_alloc_nogc();
984 }