1 /*
  2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/stringTable.hpp"
 28 #include "classfile/symbolTable.hpp"
 29 #include "classfile/vmSymbols.hpp"
 30 #include "code/codeCache.hpp"
 31 #include "compiler/oopMap.hpp"
 32 #include "gc/serial/cardTableRS.hpp"
 33 #include "gc/serial/serialFullGC.hpp"
 34 #include "gc/serial/serialHeap.inline.hpp"
 35 #include "gc/serial/serialMemoryPools.hpp"
 36 #include "gc/serial/serialVMOperations.hpp"
 37 #include "gc/serial/tenuredGeneration.inline.hpp"
 38 #include "gc/shared/cardTableBarrierSet.hpp"
 39 #include "gc/shared/classUnloadingContext.hpp"
 40 #include "gc/shared/collectedHeap.inline.hpp"
 41 #include "gc/shared/collectorCounters.hpp"
 42 #include "gc/shared/continuationGCSupport.inline.hpp"
 43 #include "gc/shared/gcId.hpp"
 44 #include "gc/shared/gcInitLogger.hpp"
 45 #include "gc/shared/gcLocker.inline.hpp"
 46 #include "gc/shared/gcPolicyCounters.hpp"
 47 #include "gc/shared/gcTrace.hpp"
 48 #include "gc/shared/gcTraceTime.inline.hpp"
 49 #include "gc/shared/gcVMOperations.hpp"
 50 #include "gc/shared/genArguments.hpp"
 51 #include "gc/shared/isGCActiveMark.hpp"
 52 #include "gc/shared/locationPrinter.inline.hpp"
 53 #include "gc/shared/oopStorage.inline.hpp"
 54 #include "gc/shared/oopStorageParState.inline.hpp"
 55 #include "gc/shared/oopStorageSet.inline.hpp"
 56 #include "gc/shared/scavengableNMethods.hpp"

 57 #include "gc/shared/space.hpp"
 58 #include "gc/shared/strongRootsScope.hpp"
 59 #include "gc/shared/suspendibleThreadSet.hpp"
 60 #include "gc/shared/weakProcessor.hpp"
 61 #include "gc/shared/workerThread.hpp"
 62 #include "memory/iterator.hpp"
 63 #include "memory/metaspaceCounters.hpp"
 64 #include "memory/metaspaceUtils.hpp"
 65 #include "memory/resourceArea.hpp"
 66 #include "memory/universe.hpp"
 67 #include "oops/oop.inline.hpp"
 68 #include "runtime/handles.hpp"
 69 #include "runtime/handles.inline.hpp"
 70 #include "runtime/java.hpp"
 71 #include "runtime/mutexLocker.hpp"
 72 #include "runtime/threads.hpp"
 73 #include "runtime/vmThread.hpp"
 74 #include "services/memoryManager.hpp"
 75 #include "services/memoryService.hpp"
 76 #include "utilities/debug.hpp"
 77 #include "utilities/formatBuffer.hpp"
 78 #include "utilities/macros.hpp"
 79 #include "utilities/stack.inline.hpp"
 80 #include "utilities/vmError.hpp"
 81 #if INCLUDE_JVMCI
 82 #include "jvmci/jvmci.hpp"
 83 #endif
 84 
 85 SerialHeap* SerialHeap::heap() {
 86   return named_heap<SerialHeap>(CollectedHeap::Serial);
 87 }
 88 
 89 SerialHeap::SerialHeap() :
 90     CollectedHeap(),
 91     _young_gen(nullptr),
 92     _old_gen(nullptr),
 93     _rem_set(nullptr),
 94     _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
 95     _incremental_collection_failed(false),
 96     _young_manager(nullptr),
 97     _old_manager(nullptr),
 98     _eden_pool(nullptr),
 99     _survivor_pool(nullptr),
100     _old_pool(nullptr) {
101   _young_manager = new GCMemoryManager("Copy");
102   _old_manager = new GCMemoryManager("MarkSweepCompact");
103 }
104 
105 void SerialHeap::initialize_serviceability() {
106   DefNewGeneration* young = young_gen();
107 
108   // Add a memory pool for each space and young gen doesn't
109   // support low memory detection as it is expected to get filled up.
110   _eden_pool = new ContiguousSpacePool(young->eden(),
111                                        "Eden Space",
112                                        young->max_eden_size(),
113                                        false /* support_usage_threshold */);
114   _survivor_pool = new SurvivorContiguousSpacePool(young,
115                                                    "Survivor Space",
116                                                    young->max_survivor_size(),
117                                                    false /* support_usage_threshold */);
118   TenuredGeneration* old = old_gen();
119   _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true);
120 
121   _young_manager->add_pool(_eden_pool);
122   _young_manager->add_pool(_survivor_pool);
123   young->set_gc_manager(_young_manager);
124 
125   _old_manager->add_pool(_eden_pool);
126   _old_manager->add_pool(_survivor_pool);
127   _old_manager->add_pool(_old_pool);
128   old->set_gc_manager(_old_manager);
129 }
130 
131 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
132   GrowableArray<GCMemoryManager*> memory_managers(2);
133   memory_managers.append(_young_manager);
134   memory_managers.append(_old_manager);
135   return memory_managers;
136 }
137 
138 GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
139   GrowableArray<MemoryPool*> memory_pools(3);
140   memory_pools.append(_eden_pool);
141   memory_pools.append(_survivor_pool);
142   memory_pools.append(_old_pool);
143   return memory_pools;
144 }
145 
146 void SerialHeap::safepoint_synchronize_begin() {
147   if (UseStringDeduplication) {
148     SuspendibleThreadSet::synchronize();
149   }
150 }
151 
152 void SerialHeap::safepoint_synchronize_end() {
153   if (UseStringDeduplication) {
154     SuspendibleThreadSet::desynchronize();
155   }
156 }
157 
158 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
159   MutexLocker ml(Heap_lock);
160   return old_gen()->allocate(word_size, false /* is_tlab */);
161 }
162 
163 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
164   assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen");
165   old_gen()->complete_loaded_archive_space(archive_space);
166 }
167 
168 void SerialHeap::pin_object(JavaThread* thread, oop obj) {
169   GCLocker::lock_critical(thread);
170 }
171 
172 void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
173   GCLocker::unlock_critical(thread);
174 }
175 
176 jint SerialHeap::initialize() {
177   // Allocate space for the heap.
178 
179   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
180 
181   if (!heap_rs.is_reserved()) {
182     vm_shutdown_during_initialization(
183       "Could not reserve enough space for object heap");
184     return JNI_ENOMEM;
185   }
186 
187   initialize_reserved_region(heap_rs);
188 
189   ReservedSpace young_rs = heap_rs.first_part(MaxNewSize);
190   ReservedSpace old_rs = heap_rs.last_part(MaxNewSize);
191 
192   _rem_set = new CardTableRS(heap_rs.region());
193   _rem_set->initialize(young_rs.base(), old_rs.base());
194 
195   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
196   bs->initialize();
197   BarrierSet::set_barrier_set(bs);
198 
199   _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
200   _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set());
201 
202   GCInitLogger::print();
203 


204   return JNI_OK;
205 }
206 
207 ReservedHeapSpace SerialHeap::allocate(size_t alignment) {
208   // Now figure out the total size.
209   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
210   assert(alignment % pageSize == 0, "Must be");
211 
212   // Check for overflow.
213   size_t total_reserved = MaxNewSize + MaxOldSize;
214   if (total_reserved < MaxNewSize) {
215     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
216                                   "the maximum representable size");
217   }
218   assert(total_reserved % alignment == 0,
219          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
220          SIZE_FORMAT, total_reserved, alignment);
221 
222   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
223   size_t used_page_size = heap_rs.page_size();
224 
225   os::trace_page_sizes("Heap",
226                        MinHeapSize,
227                        total_reserved,
228                        heap_rs.base(),
229                        heap_rs.size(),
230                        used_page_size);
231 
232   return heap_rs;
233 }
234 
235 class GenIsScavengable : public BoolObjectClosure {
236 public:
237   bool do_object_b(oop obj) {
238     return SerialHeap::heap()->is_in_young(obj);
239   }
240 };
241 
242 static GenIsScavengable _is_scavengable;
243 
244 void SerialHeap::post_initialize() {
245   CollectedHeap::post_initialize();
246 
247   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
248 
249   def_new_gen->ref_processor_init();
250 
251   SerialFullGC::initialize();
252 
253   ScavengableNMethods::initialize(&_is_scavengable);
254 }
255 
256 PreGenGCValues SerialHeap::get_pre_gc_values() const {
257   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
258 
259   return PreGenGCValues(def_new_gen->used(),
260                         def_new_gen->capacity(),
261                         def_new_gen->eden()->used(),
262                         def_new_gen->eden()->capacity(),
263                         def_new_gen->from()->used(),
264                         def_new_gen->from()->capacity(),
265                         old_gen()->used(),
266                         old_gen()->capacity());
267 }
268 
269 size_t SerialHeap::capacity() const {
270   return _young_gen->capacity() + _old_gen->capacity();
271 }
272 
273 size_t SerialHeap::used() const {
274   return _young_gen->used() + _old_gen->used();
275 }
276 
277 size_t SerialHeap::max_capacity() const {
278   return _young_gen->max_capacity() + _old_gen->max_capacity();
279 }
280 
281 // Return true if any of the following is true:
282 // . the allocation won't fit into the current young gen heap
283 // . gc locker is occupied (jni critical section)
284 // . heap memory is tight -- the most recent previous collection
285 //   was a full collection because a partial collection (would
286 //   have) failed and is likely to fail again
287 bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const {
288   size_t young_capacity = _young_gen->capacity_before_gc();
289   return    (word_size > heap_word_size(young_capacity))
290          || GCLocker::is_active_and_needs_gc()
291          || incremental_collection_failed();
292 }
293 
294 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
295   HeapWord* result = nullptr;
296   if (_old_gen->should_allocate(size, is_tlab)) {
297     result = _old_gen->expand_and_allocate(size, is_tlab);
298   }
299   if (result == nullptr) {
300     if (_young_gen->should_allocate(size, is_tlab)) {
301       result = _young_gen->expand_and_allocate(size, is_tlab);
302     }
303   }
304   assert(result == nullptr || is_in_reserved(result), "result not in heap");
305   return result;
306 }
307 
308 HeapWord* SerialHeap::mem_allocate_work(size_t size,
309                                         bool is_tlab) {
310 
311   HeapWord* result = nullptr;
312 
313   // Loop until the allocation is satisfied, or unsatisfied after GC.
314   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
315 
316     // First allocation attempt is lock-free.
317     DefNewGeneration *young = _young_gen;
318     if (young->should_allocate(size, is_tlab)) {
319       result = young->par_allocate(size, is_tlab);
320       if (result != nullptr) {
321         assert(is_in_reserved(result), "result not in heap");
322         return result;
323       }
324     }
325     uint gc_count_before;  // Read inside the Heap_lock locked region.
326     {
327       MutexLocker ml(Heap_lock);
328       log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation");
329       // Note that only large objects get a shot at being
330       // allocated in later generations.
331       bool first_only = !should_try_older_generation_allocation(size);
332 
333       result = attempt_allocation(size, is_tlab, first_only);
334       if (result != nullptr) {
335         assert(is_in_reserved(result), "result not in heap");
336         return result;
337       }
338 
339       if (GCLocker::is_active_and_needs_gc()) {
340         if (is_tlab) {
341           return nullptr;  // Caller will retry allocating individual object.
342         }
343         if (!is_maximal_no_gc()) {
344           // Try and expand heap to satisfy request.
345           result = expand_heap_and_allocate(size, is_tlab);
346           // Result could be null if we are out of space.
347           if (result != nullptr) {
348             return result;
349           }
350         }
351 
352         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
353           return nullptr; // We didn't get to do a GC and we didn't get any memory.
354         }
355 
356         // If this thread is not in a jni critical section, we stall
357         // the requestor until the critical section has cleared and
358         // GC allowed. When the critical section clears, a GC is
359         // initiated by the last thread exiting the critical section; so
360         // we retry the allocation sequence from the beginning of the loop,
361         // rather than causing more, now probably unnecessary, GC attempts.
362         JavaThread* jthr = JavaThread::current();
363         if (!jthr->in_critical()) {
364           MutexUnlocker mul(Heap_lock);
365           // Wait for JNI critical section to be exited
366           GCLocker::stall_until_clear();
367           gclocker_stalled_count += 1;
368           continue;
369         } else {
370           if (CheckJNICalls) {
371             fatal("Possible deadlock due to allocating while"
372                   " in jni critical section");
373           }
374           return nullptr;
375         }
376       }
377 
378       // Read the gc count while the heap lock is held.
379       gc_count_before = total_collections();
380     }
381 
382     VM_SerialCollectForAllocation op(size, is_tlab, gc_count_before);
383     VMThread::execute(&op);
384     if (op.prologue_succeeded()) {
385       result = op.result();
386       if (op.gc_locked()) {
387          assert(result == nullptr, "must be null if gc_locked() is true");
388          continue;  // Retry and/or stall as necessary.
389       }
390 
391       assert(result == nullptr || is_in_reserved(result),
392              "result not in heap");
393       return result;
394     }
395 
396     // Give a warning if we seem to be looping forever.
397     if ((QueuedAllocationWarningCount > 0) &&
398         (try_count % QueuedAllocationWarningCount == 0)) {
399           log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
400                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
401     }
402   }
403 }
404 
405 HeapWord* SerialHeap::attempt_allocation(size_t size,
406                                          bool is_tlab,
407                                          bool first_only) {
408   HeapWord* res = nullptr;
409 
410   if (_young_gen->should_allocate(size, is_tlab)) {
411     res = _young_gen->allocate(size, is_tlab);
412     if (res != nullptr || first_only) {
413       return res;
414     }
415   }
416 
417   if (_old_gen->should_allocate(size, is_tlab)) {
418     res = _old_gen->allocate(size, is_tlab);
419   }
420 
421   return res;
422 }
423 
424 HeapWord* SerialHeap::mem_allocate(size_t size,
425                                    bool* gc_overhead_limit_was_exceeded) {
426   return mem_allocate_work(size,
427                            false /* is_tlab */);
428 }
429 
430 bool SerialHeap::must_clear_all_soft_refs() {
431   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
432          _gc_cause == GCCause::_wb_full_gc;
433 }
434 
435 bool SerialHeap::is_young_gc_safe() const {
436   if (!_young_gen->to()->is_empty()) {
437     return false;
438   }
439   return _old_gen->promotion_attempt_is_safe(_young_gen->used());
440 }
441 
442 bool SerialHeap::do_young_collection(bool clear_soft_refs) {
443   if (!is_young_gc_safe()) {
444     return false;
445   }
446   IsSTWGCActiveMark gc_active_mark;
447   SvcGCMarker sgcm(SvcGCMarker::MINOR);
448   GCIdMark gc_id_mark;
449   GCTraceCPUTime tcpu(_young_gen->gc_tracer());
450   GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
451   TraceCollectorStats tcs(_young_gen->counters());
452   TraceMemoryManagerStats tmms(_young_gen->gc_manager(), gc_cause(), "end of minor GC");
453   print_heap_before_gc();
454   const PreGenGCValues pre_gc_values = get_pre_gc_values();
455 
456   increment_total_collections(false);
457   const bool should_verify = total_collections() >= VerifyGCStartAt;
458   if (should_verify && VerifyBeforeGC) {
459     prepare_for_verify();
460     Universe::verify("Before GC");
461   }
462   gc_prologue(false);
463   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
464 
465   save_marks();
466 
467   bool result = _young_gen->collect(clear_soft_refs);
468 
469   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
470 
471   // Only update stats for successful young-gc
472   if (result) {
473     _old_gen->update_promote_stats();
474   }
475 
476   if (should_verify && VerifyAfterGC) {
477     Universe::verify("After GC");
478   }
479 
480   _young_gen->compute_new_size();
481 
482   print_heap_change(pre_gc_values);
483 
484   // Track memory usage and detect low memory after GC finishes
485   MemoryService::track_memory_usage();
486 
487   gc_epilogue(false);
488 
489   print_heap_after_gc();
490 
491   return result;
492 }
493 
494 void SerialHeap::register_nmethod(nmethod* nm) {
495   ScavengableNMethods::register_nmethod(nm);
496 }
497 
498 void SerialHeap::unregister_nmethod(nmethod* nm) {
499   ScavengableNMethods::unregister_nmethod(nm);
500 }
501 
502 void SerialHeap::verify_nmethod(nmethod* nm) {
503   ScavengableNMethods::verify_nmethod(nm);
504 }
505 
506 void SerialHeap::prune_scavengable_nmethods() {
507   ScavengableNMethods::prune_nmethods_not_into_young();
508 }
509 
510 void SerialHeap::prune_unlinked_nmethods() {
511   ScavengableNMethods::prune_unlinked_nmethods();
512 }
513 
514 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
515   assert(size != 0, "precondition");
516 
517   HeapWord* result = nullptr;
518 
519   GCLocker::check_active_before_gc();
520   if (GCLocker::is_active_and_needs_gc()) {
521     // GC locker is active; instead of a collection we will attempt
522     // to expand the heap, if there's room for expansion.
523     if (!is_maximal_no_gc()) {
524       result = expand_heap_and_allocate(size, is_tlab);
525     }
526     return result;   // Could be null if we are out of space.
527   }
528 
529   // If young-gen can handle this allocation, attempt young-gc firstly.
530   bool should_run_young_gc = _young_gen->should_allocate(size, is_tlab);
531   collect_at_safepoint(!should_run_young_gc);
532 
533   result = attempt_allocation(size, is_tlab, false /*first_only*/);
534   if (result != nullptr) {
535     return result;
536   }
537 
538   // OK, collection failed, try expansion.
539   result = expand_heap_and_allocate(size, is_tlab);
540   if (result != nullptr) {
541     return result;
542   }
543 
544   // If we reach this point, we're really out of memory. Try every trick
545   // we can to reclaim memory. Force collection of soft references. Force
546   // a complete compaction of the heap. Any additional methods for finding
547   // free memory should be here, especially if they are expensive. If this
548   // attempt fails, an OOM exception will be thrown.
549   {
550     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
551     const bool clear_all_soft_refs = true;
552     do_full_collection_no_gc_locker(clear_all_soft_refs);
553   }
554 
555   result = attempt_allocation(size, is_tlab, false /* first_only */);
556   if (result != nullptr) {
557     return result;
558   }
559   // The previous full-gc can shrink the heap, so re-expand it.
560   result = expand_heap_and_allocate(size, is_tlab);
561   if (result != nullptr) {
562     return result;
563   }
564 
565   // What else?  We might try synchronous finalization later.  If the total
566   // space available is large enough for the allocation, then a more
567   // complete compaction phase than we've tried so far might be
568   // appropriate.
569   return nullptr;
570 }
571 
572 void SerialHeap::process_roots(ScanningOption so,
573                                OopClosure* strong_roots,
574                                CLDClosure* strong_cld_closure,
575                                CLDClosure* weak_cld_closure,
576                                NMethodToOopClosure* code_roots) {
577   // General roots.
578   assert(code_roots != nullptr, "code root closure should always be set");
579 
580   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
581 
582   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
583   NMethodToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
584 
585   Threads::oops_do(strong_roots, roots_from_code_p);
586 
587   OopStorageSet::strong_oops_do(strong_roots);
588 
589   if (so & SO_ScavengeCodeCache) {
590     assert(code_roots != nullptr, "must supply closure for code cache");
591 
592     // We only visit parts of the CodeCache when scavenging.
593     ScavengableNMethods::nmethods_do(code_roots);
594   }
595   if (so & SO_AllCodeCache) {
596     assert(code_roots != nullptr, "must supply closure for code cache");
597 
598     // CMSCollector uses this to do intermediate-strength collections.
599     // We scan the entire code cache, since CodeCache::do_unloading is not called.
600     CodeCache::nmethods_do(code_roots);
601   }
602 }
603 
604 template <typename OopClosureType>
605 static void oop_iterate_from(OopClosureType* blk, ContiguousSpace* space, HeapWord** from) {
606   assert(*from != nullptr, "precondition");
607   HeapWord* t;
608   HeapWord* p = *from;
609 
610   const intx interval = PrefetchScanIntervalInBytes;
611   do {
612     t = space->top();
613     while (p < t) {
614       Prefetch::write(p, interval);
615       p += cast_to_oop(p)->oop_iterate_size(blk);
616     }
617   } while (t < space->top());
618 
619   *from = space->top();
620 }
621 
622 void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl,
623                                      OldGenScanClosure* old_cl) {
624   ContiguousSpace* to_space = young_gen()->to();
625   do {
626     oop_iterate_from(young_cl, to_space, &_young_gen_saved_top);
627     oop_iterate_from(old_cl, old_gen()->space(), &_old_gen_saved_top);
628     // Recheck to-space only, because postcondition of oop_iterate_from is no
629     // unscanned objs
630   } while (_young_gen_saved_top != to_space->top());
631   guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
632 }
633 
634 void SerialHeap::try_collect_at_safepoint(bool full) {
635   assert(SafepointSynchronize::is_at_safepoint(), "precondition");
636   if (GCLocker::check_active_before_gc()) {
637     return;
638   }
639   collect_at_safepoint(full);
640 }
641 
642 void SerialHeap::collect_at_safepoint(bool full) {
643   assert(!GCLocker::is_active(), "precondition");
644   bool clear_soft_refs = must_clear_all_soft_refs();
645 
646   if (!full) {
647     bool success = do_young_collection(clear_soft_refs);
648     if (success) {
649       return;
650     }
651     // Upgrade to Full-GC if young-gc fails
652   }
653   do_full_collection_no_gc_locker(clear_soft_refs);
654 }
655 
656 // public collection interfaces
657 void SerialHeap::collect(GCCause::Cause cause) {
658   // The caller doesn't have the Heap_lock
659   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
660 
661   unsigned int gc_count_before;
662   unsigned int full_gc_count_before;
663 
664   {
665     MutexLocker ml(Heap_lock);
666     // Read the GC count while holding the Heap_lock
667     gc_count_before      = total_collections();
668     full_gc_count_before = total_full_collections();
669   }
670 
671   if (GCLocker::should_discard(cause, gc_count_before)) {
672     return;
673   }
674 
675   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
676                            || (cause == GCCause::_gc_locker)
677                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
678 
679   while (true) {
680     VM_SerialGCCollect op(!should_run_young_gc,
681                           gc_count_before,
682                           full_gc_count_before,
683                           cause);
684     VMThread::execute(&op);
685 
686     if (!GCCause::is_explicit_full_gc(cause)) {
687       return;
688     }
689 
690     {
691       MutexLocker ml(Heap_lock);
692       // Read the GC count while holding the Heap_lock
693       if (full_gc_count_before != total_full_collections()) {
694         return;
695       }
696     }
697 
698     if (GCLocker::is_active_and_needs_gc()) {
699       // If GCLocker is active, wait until clear before retrying.
700       GCLocker::stall_until_clear();
701     }
702   }
703 }
704 
705 void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
706   if (GCLocker::check_active_before_gc()) {
707     return;
708   }
709   do_full_collection_no_gc_locker(clear_all_soft_refs);
710 }
711 
712 void SerialHeap::do_full_collection_no_gc_locker(bool clear_all_soft_refs) {
713   IsSTWGCActiveMark gc_active_mark;
714   SvcGCMarker sgcm(SvcGCMarker::FULL);
715   GCIdMark gc_id_mark;
716   GCTraceCPUTime tcpu(SerialFullGC::gc_tracer());
717   GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
718   TraceCollectorStats tcs(_old_gen->counters());
719   TraceMemoryManagerStats tmms(_old_gen->gc_manager(), gc_cause(), "end of major GC");
720   const PreGenGCValues pre_gc_values = get_pre_gc_values();
721   print_heap_before_gc();
722 
723   increment_total_collections(true);
724   const bool should_verify = total_collections() >= VerifyGCStartAt;
725   if (should_verify && VerifyBeforeGC) {
726     prepare_for_verify();
727     Universe::verify("Before GC");
728   }
729 
730   gc_prologue(true);
731   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
732   CodeCache::on_gc_marking_cycle_start();
733   ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
734                             false /* unregister_nmethods_during_purge */,
735                             false /* lock_nmethod_free_separately */);
736 
737   STWGCTimer* gc_timer = SerialFullGC::gc_timer();
738   gc_timer->register_gc_start();
739 
740   SerialOldTracer* gc_tracer = SerialFullGC::gc_tracer();
741   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
742 
743   pre_full_gc_dump(gc_timer);
744 
745   SerialFullGC::invoke_at_safepoint(clear_all_soft_refs);
746 
747   post_full_gc_dump(gc_timer);
748 
749   gc_timer->register_gc_end();
750 
751   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
752   CodeCache::on_gc_marking_cycle_finish();
753   CodeCache::arm_all_nmethods();
754   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
755 
756   // Adjust generation sizes.
757   _old_gen->compute_new_size();
758   _young_gen->compute_new_size();
759 
760   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
761   ClassLoaderDataGraph::purge(/*at_safepoint*/true);
762   DEBUG_ONLY(MetaspaceUtils::verify();)
763 
764   // Need to clear claim bits for the next mark.
765   ClassLoaderDataGraph::clear_claimed_marks();
766 
767   _old_gen->update_promote_stats();
768 
769   // Resize the metaspace capacity after full collections
770   MetaspaceGC::compute_new_size();
771 
772   print_heap_change(pre_gc_values);
773 
774   // Track memory usage and detect low memory after GC finishes
775   MemoryService::track_memory_usage();
776 
777   // Need to tell the epilogue code we are done with Full GC, regardless what was
778   // the initial value for "complete" flag.
779   gc_epilogue(true);
780 
781   print_heap_after_gc();
782 
783   if (should_verify && VerifyAfterGC) {
784     Universe::verify("After GC");
785   }
786 }
787 
788 bool SerialHeap::is_in_young(const void* p) const {
789   bool result = p < _old_gen->reserved().start();
790   assert(result == _young_gen->is_in_reserved(p),
791          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
792   return result;
793 }
794 
795 bool SerialHeap::requires_barriers(stackChunkOop obj) const {
796   return !is_in_young(obj);
797 }
798 
799 // Returns "TRUE" iff "p" points into the committed areas of the heap.
800 bool SerialHeap::is_in(const void* p) const {
801   return _young_gen->is_in(p) || _old_gen->is_in(p);
802 }
803 
804 void SerialHeap::object_iterate(ObjectClosure* cl) {
805   _young_gen->object_iterate(cl);
806   _old_gen->object_iterate(cl);
807 }
808 
809 HeapWord* SerialHeap::block_start(const void* addr) const {
810   assert(is_in_reserved(addr), "block_start of address outside of heap");
811   if (_young_gen->is_in_reserved(addr)) {
812     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
813     return _young_gen->block_start(addr);
814   }
815 
816   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
817   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
818   return _old_gen->block_start(addr);
819 }
820 
821 bool SerialHeap::block_is_obj(const HeapWord* addr) const {
822   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
823   assert(block_start(addr) == addr, "addr must be a block start");
824 
825   if (_young_gen->is_in_reserved(addr)) {
826     return _young_gen->eden()->is_in(addr)
827         || _young_gen->from()->is_in(addr)
828         || _young_gen->to()  ->is_in(addr);
829   }
830 
831   assert(_old_gen->is_in_reserved(addr), "must be in old-gen");
832   return addr < _old_gen->space()->top();
833 }
834 
835 size_t SerialHeap::tlab_capacity(Thread* thr) const {
836   // Only young-gen supports tlab allocation.
837   return _young_gen->tlab_capacity();
838 }
839 
840 size_t SerialHeap::tlab_used(Thread* thr) const {
841   return _young_gen->tlab_used();
842 }
843 
844 size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const {
845   return _young_gen->unsafe_max_tlab_alloc();
846 }
847 
848 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size,
849                                         size_t requested_size,
850                                         size_t* actual_size) {
851   HeapWord* result = mem_allocate_work(requested_size /* size */,
852                                        true /* is_tlab */);
853   if (result != nullptr) {
854     *actual_size = requested_size;
855   }
856 
857   return result;
858 }
859 
860 void SerialHeap::prepare_for_verify() {
861   ensure_parsability(false);        // no need to retire TLABs
862 }
863 
864 bool SerialHeap::is_maximal_no_gc() const {
865   // We don't expand young-gen except at a GC.
866   return _old_gen->is_maximal_no_gc();
867 }
868 
869 void SerialHeap::save_marks() {
870   _young_gen_saved_top = _young_gen->to()->top();
871   _old_gen_saved_top = _old_gen->space()->top();
872 }
873 
874 void SerialHeap::verify(VerifyOption option /* ignored */) {
875   log_debug(gc, verify)("%s", _old_gen->name());
876   _old_gen->verify();
877 
878   log_debug(gc, verify)("%s", _young_gen->name());
879   _young_gen->verify();
880 
881   log_debug(gc, verify)("RemSet");
882   rem_set()->verify();
883 }
884 
885 void SerialHeap::print_on(outputStream* st) const {
886   if (_young_gen != nullptr) {
887     _young_gen->print_on(st);
888   }
889   if (_old_gen != nullptr) {
890     _old_gen->print_on(st);
891   }
892   MetaspaceUtils::print_on(st);
893 }
894 
895 void SerialHeap::gc_threads_do(ThreadClosure* tc) const {
896 }
897 
898 bool SerialHeap::print_location(outputStream* st, void* addr) const {
899   return BlockLocationPrinter<SerialHeap>::print_location(st, addr);
900 }
901 
902 void SerialHeap::print_tracing_info() const {
903  // Does nothing
904 }
905 
906 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
907   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
908 
909   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
910                      HEAP_CHANGE_FORMAT" "
911                      HEAP_CHANGE_FORMAT,
912                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
913                                              pre_gc_values.young_gen_used(),
914                                              pre_gc_values.young_gen_capacity(),
915                                              def_new_gen->used(),
916                                              def_new_gen->capacity()),
917                      HEAP_CHANGE_FORMAT_ARGS("Eden",
918                                              pre_gc_values.eden_used(),
919                                              pre_gc_values.eden_capacity(),
920                                              def_new_gen->eden()->used(),
921                                              def_new_gen->eden()->capacity()),
922                      HEAP_CHANGE_FORMAT_ARGS("From",
923                                              pre_gc_values.from_used(),
924                                              pre_gc_values.from_capacity(),
925                                              def_new_gen->from()->used(),
926                                              def_new_gen->from()->capacity()));
927   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
928                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
929                                              pre_gc_values.old_gen_used(),
930                                              pre_gc_values.old_gen_capacity(),
931                                              old_gen()->used(),
932                                              old_gen()->capacity()));
933   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
934 }
935 
936 void SerialHeap::gc_prologue(bool full) {
937   // Fill TLAB's and such
938   ensure_parsability(true);   // retire TLABs
939 
940   _old_gen->gc_prologue();
941 };
942 
943 void SerialHeap::gc_epilogue(bool full) {
944 #if COMPILER2_OR_JVMCI
945   assert(DerivedPointerTable::is_empty(), "derived pointer present");
946 #endif // COMPILER2_OR_JVMCI
947 
948   resize_all_tlabs();
949 
950   _young_gen->gc_epilogue(full);
951   _old_gen->gc_epilogue();
952 
953   MetaspaceCounters::update_performance_counters();
954 };
--- EOF ---