1 /*
  2  * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.hpp"
 27 #include "classfile/javaClasses.hpp"
 28 #include "gc/shared/allocTracer.hpp"
 29 #include "gc/shared/gcId.hpp"
 30 #include "gc/shared/gcLocker.hpp"
 31 #include "gc/shared/gcVMOperations.hpp"
 32 #include "gc/shared/gc_globals.hpp"
 33 #include "gc/shared/genCollectedHeap.hpp"
 34 #include "interpreter/oopMapCache.hpp"
 35 #include "logging/log.hpp"
 36 #include "memory/classLoaderMetaspace.hpp"
 37 #include "memory/heapInspection.hpp"
 38 #include "memory/oopFactory.hpp"
 39 #include "memory/universe.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/init.hpp"
 42 #include "runtime/java.hpp"
 43 #include "runtime/mutexLocker.hpp"
 44 #include "utilities/dtrace.hpp"
 45 #include "utilities/macros.hpp"
 46 #include "utilities/preserveException.hpp"
 47 #if INCLUDE_G1GC
 48 #include "gc/g1/g1CollectedHeap.inline.hpp"
 49 #include "gc/g1/g1Policy.hpp"
 50 #endif // INCLUDE_G1GC
 51 
 52 bool VM_GC_Sync_Operation::doit_prologue() {
 53   Heap_lock->lock();
 54   return true;
 55 }
 56 
 57 void VM_GC_Sync_Operation::doit_epilogue() {
 58   Heap_lock->unlock();
 59 }
 60 
 61 void VM_Verify::doit() {
 62   Universe::heap()->prepare_for_verify();
 63   Universe::verify();
 64 }
 65 
 66 VM_GC_Operation::~VM_GC_Operation() {
 67   CollectedHeap* ch = Universe::heap();
 68   ch->soft_ref_policy()->set_all_soft_refs_clear(false);
 69 }
 70 
 71 const char* VM_GC_Operation::cause() const {
 72   return GCCause::to_string(_gc_cause);
 73 }
 74 
 75 // The same dtrace probe can't be inserted in two different files, so we
 76 // have to call it here, so it's only in one file.  Can't create new probes
 77 // for the other file anymore.   The dtrace probes have to remain stable.
 78 void VM_GC_Operation::notify_gc_begin(bool full) {
 79   HOTSPOT_GC_BEGIN(
 80                    full);
 81 }
 82 
 83 void VM_GC_Operation::notify_gc_end() {
 84   HOTSPOT_GC_END();
 85 }
 86 
 87 // Allocations may fail in several threads at about the same time,
 88 // resulting in multiple gc requests.  We only want to do one of them.
 89 // In case a GC locker is active and the need for a GC is already signaled,
 90 // we want to skip this GC attempt altogether, without doing a futile
 91 // safepoint operation.
 92 bool VM_GC_Operation::skip_operation() const {
 93   bool skip = (_gc_count_before != Universe::heap()->total_collections());
 94   if (_full && skip) {
 95     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
 96   }
 97   if (!skip && GCLocker::is_active_and_needs_gc()) {
 98     skip = Universe::heap()->is_maximal_no_gc();
 99     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
100            "GCLocker cannot be active when initiating GC");
101   }
102   return skip;
103 }
104 
105 bool VM_GC_Operation::doit_prologue() {
106   assert(((_gc_cause != GCCause::_no_gc) &&
107           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
108 
109   // To be able to handle a GC the VM initialization needs to be completed.
110   if (!is_init_completed()) {
111     vm_exit_during_initialization(
112       err_msg("GC triggered before VM initialization completed. Try increasing "
113               "NewSize, current value " SIZE_FORMAT "%s.",
114               byte_size_in_proper_unit(NewSize),
115               proper_unit_for_byte_size(NewSize)));
116   }
117 
118   VM_GC_Sync_Operation::doit_prologue();
119 
120   // Check invocations
121   if (skip_operation()) {
122     // skip collection
123     Heap_lock->unlock();
124     _prologue_succeeded = false;
125   } else {
126     _prologue_succeeded = true;
127   }
128   return _prologue_succeeded;
129 }
130 
131 
132 void VM_GC_Operation::doit_epilogue() {
133   // GC thread root traversal likely used OopMapCache a lot, which
134   // might have created lots of old entries. Trigger the cleanup now.
135   OopMapCache::try_trigger_cleanup();
136   if (Universe::has_reference_pending_list()) {
137     Heap_lock->notify_all();
138   }
139   VM_GC_Sync_Operation::doit_epilogue();
140 }
141 
142 bool VM_GC_HeapInspection::doit_prologue() {
143   if (_full_gc && (UseZGC || UseShenandoahGC)) {
144     // ZGC and Shenandoah cannot perform a synchronous GC cycle from within the VM thread.
145     // So VM_GC_HeapInspection::collect() is a noop. To respect the _full_gc
146     // flag a synchronous GC cycle is performed from the caller thread in the
147     // prologue.
148     Universe::heap()->collect(GCCause::_heap_inspection);
149   }
150   return VM_GC_Operation::doit_prologue();
151 }
152 
153 bool VM_GC_HeapInspection::skip_operation() const {
154   return false;
155 }
156 
157 bool VM_GC_HeapInspection::collect() {
158   if (GCLocker::is_active()) {
159     return false;
160   }
161   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
162   return true;
163 }
164 
165 void VM_GC_HeapInspection::doit() {
166   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
167                                                // not happen (e.g. due to GCLocker)
168                                                // or _full_gc being false
169   if (_full_gc) {
170     if (!collect()) {
171       // The collection attempt was skipped because the gc locker is held.
172       // The following dump may then be a tad misleading to someone expecting
173       // only live objects to show up in the dump (see CR 6944195). Just issue
174       // a suitable warning in that case and do not attempt to do a collection.
175       // The latter is a subtle point, because even a failed attempt
176       // to GC will, in fact, induce one in the future, which we
177       // probably want to avoid in this case because the GC that we may
178       // be about to attempt holds value for us only
179       // if it happens now and not if it happens in the eventual
180       // future.
181       log_warning(gc)("GC locker is held; pre-dump GC was skipped");
182     }
183   }
184   HeapInspection inspect;
185   WorkerThreads* workers = Universe::heap()->safepoint_workers();
186   if (workers != nullptr) {
187     // The GC provided a WorkerThreads to be used during a safepoint.
188     // Can't run with more threads than provided by the WorkerThreads.
189     const uint capped_parallel_thread_num = MIN2(_parallel_thread_num, workers->max_workers());
190     WithActiveWorkers with_active_workers(workers, capped_parallel_thread_num);
191     inspect.heap_inspection(_out, workers);
192   } else {
193     inspect.heap_inspection(_out, nullptr);
194   }
195 }
196 
197 
198 void VM_GenCollectForAllocation::doit() {
199   SvcGCMarker sgcm(SvcGCMarker::MINOR);
200 
201   GenCollectedHeap* gch = GenCollectedHeap::heap();
202   GCCauseSetter gccs(gch, _gc_cause);
203   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
204   assert(_result == nullptr || gch->is_in_reserved(_result), "result not in heap");
205 
206   if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
207     set_gc_locked();
208   }
209 }
210 
211 void VM_GenCollectFull::doit() {
212   SvcGCMarker sgcm(SvcGCMarker::FULL);
213 
214   GenCollectedHeap* gch = GenCollectedHeap::heap();
215   GCCauseSetter gccs(gch, _gc_cause);
216   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
217 }
218 
219 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
220                                                                  size_t size,
221                                                                  Metaspace::MetadataType mdtype,
222                                                                  uint gc_count_before,
223                                                                  uint full_gc_count_before,
224                                                                  GCCause::Cause gc_cause)
225     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
226       _result(nullptr), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
227   assert(_size != 0, "An allocation should always be requested with this operation.");
228   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
229 }
230 
231 void VM_CollectForMetadataAllocation::doit() {
232   SvcGCMarker sgcm(SvcGCMarker::FULL);
233 
234   CollectedHeap* heap = Universe::heap();
235   GCCauseSetter gccs(heap, _gc_cause);
236 
237   // Check again if the space is available.  Another thread
238   // may have similarly failed a metadata allocation and induced
239   // a GC that freed space for the allocation.
240   if (!MetadataAllocationFailALot) {
241     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
242     if (_result != nullptr) {
243       return;
244     }
245   }
246 
247 #if INCLUDE_G1GC
248   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
249     G1CollectedHeap::heap()->start_concurrent_gc_for_metadata_allocation(_gc_cause);
250     // For G1 expand since the collection is going to be concurrent.
251     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
252     if (_result != nullptr) {
253       return;
254     }
255 
256     log_debug(gc)("G1 full GC for Metaspace");
257   }
258 #endif
259 
260   // Don't clear the soft refs yet.
261   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
262   // After a GC try to allocate without expanding.  Could fail
263   // and expansion will be tried below.
264   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
265   if (_result != nullptr) {
266     return;
267   }
268 
269   // If still failing, allow the Metaspace to expand.
270   // See delta_capacity_until_GC() for explanation of the
271   // amount of the expansion.
272   // This should work unless there really is no more space
273   // or a MaxMetaspaceSize has been specified on the command line.
274   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
275   if (_result != nullptr) {
276     return;
277   }
278 
279   // If expansion failed, do a collection clearing soft references.
280   heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs);
281   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
282   if (_result != nullptr) {
283     return;
284   }
285 
286   log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
287 
288   if (GCLocker::is_active_and_needs_gc()) {
289     set_gc_locked();
290   }
291 }
292 
293 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
294     : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(nullptr) {
295   // Only report if operation was really caused by an allocation.
296   if (_word_size != 0) {
297     AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
298   }
299 }