1 /*
  2  * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.hpp"
 27 #include "classfile/javaClasses.hpp"
 28 #include "gc/shared/allocTracer.hpp"
 29 #include "gc/shared/gcId.hpp"
 30 #include "gc/shared/gcLocker.hpp"
 31 #include "gc/shared/gcVMOperations.hpp"
 32 #include "gc/shared/gc_globals.hpp"
 33 #include "gc/shared/genCollectedHeap.hpp"
 34 #include "interpreter/oopMapCache.hpp"
 35 #include "logging/log.hpp"
 36 #include "memory/classLoaderMetaspace.hpp"
 37 #include "memory/heapInspection.hpp"
 38 #include "memory/oopFactory.hpp"
 39 #include "memory/universe.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/init.hpp"
 42 #include "runtime/java.hpp"
 43 #include "utilities/dtrace.hpp"
 44 #include "utilities/macros.hpp"
 45 #include "utilities/preserveException.hpp"
 46 #if INCLUDE_G1GC
 47 #include "gc/g1/g1CollectedHeap.inline.hpp"
 48 #include "gc/g1/g1Policy.hpp"
 49 #endif // INCLUDE_G1GC
 50 
 51 bool VM_GC_Sync_Operation::doit_prologue() {
 52   Heap_lock->lock();
 53   return true;
 54 }
 55 
 56 void VM_GC_Sync_Operation::doit_epilogue() {
 57   Heap_lock->unlock();
 58 }
 59 
 60 void VM_Verify::doit() {
 61   Universe::heap()->prepare_for_verify();
 62   Universe::verify();
 63 }
 64 
 65 VM_GC_Operation::~VM_GC_Operation() {
 66   CollectedHeap* ch = Universe::heap();
 67   ch->soft_ref_policy()->set_all_soft_refs_clear(false);
 68 }
 69 
 70 // The same dtrace probe can't be inserted in two different files, so we
 71 // have to call it here, so it's only in one file.  Can't create new probes
 72 // for the other file anymore.   The dtrace probes have to remain stable.
 73 void VM_GC_Operation::notify_gc_begin(bool full) {
 74   HOTSPOT_GC_BEGIN(
 75                    full);
 76 }
 77 
 78 void VM_GC_Operation::notify_gc_end() {
 79   HOTSPOT_GC_END();
 80 }
 81 
 82 // Allocations may fail in several threads at about the same time,
 83 // resulting in multiple gc requests.  We only want to do one of them.
 84 // In case a GC locker is active and the need for a GC is already signaled,
 85 // we want to skip this GC attempt altogether, without doing a futile
 86 // safepoint operation.
 87 bool VM_GC_Operation::skip_operation() const {
 88   bool skip = (_gc_count_before != Universe::heap()->total_collections());
 89   if (_full && skip) {
 90     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
 91   }
 92   if (!skip && GCLocker::is_active_and_needs_gc()) {
 93     skip = Universe::heap()->is_maximal_no_gc();
 94     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
 95            "GCLocker cannot be active when initiating GC");
 96   }
 97   return skip;
 98 }
 99 
100 bool VM_GC_Operation::doit_prologue() {
101   assert(((_gc_cause != GCCause::_no_gc) &&
102           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
103 
104   // To be able to handle a GC the VM initialization needs to be completed.
105   if (!is_init_completed()) {
106     vm_exit_during_initialization(
107       err_msg("GC triggered before VM initialization completed. Try increasing "
108               "NewSize, current value " SIZE_FORMAT "%s.",
109               byte_size_in_proper_unit(NewSize),
110               proper_unit_for_byte_size(NewSize)));
111   }
112 
113   VM_GC_Sync_Operation::doit_prologue();
114 
115   // Check invocations
116   if (skip_operation()) {
117     // skip collection
118     Heap_lock->unlock();
119     _prologue_succeeded = false;
120   } else {
121     _prologue_succeeded = true;
122   }
123   return _prologue_succeeded;
124 }
125 
126 
127 void VM_GC_Operation::doit_epilogue() {
128   // Clean up old interpreter OopMap entries that were replaced
129   // during the GC thread root traversal.
130   OopMapCache::cleanup_old_entries();
131   if (Universe::has_reference_pending_list()) {
132     Heap_lock->notify_all();
133   }
134   VM_GC_Sync_Operation::doit_epilogue();
135 }
136 
137 bool VM_GC_HeapInspection::skip_operation() const {
138   return false;
139 }
140 
141 bool VM_GC_HeapInspection::collect() {
142   if (GCLocker::is_active()) {
143     return false;
144   }
145   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
146   return true;
147 }
148 
149 void VM_GC_HeapInspection::doit() {
150   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
151                                                // not happen (e.g. due to GCLocker)
152                                                // or _full_gc being false
153   if (_full_gc) {
154     if (!collect()) {
155       // The collection attempt was skipped because the gc locker is held.
156       // The following dump may then be a tad misleading to someone expecting
157       // only live objects to show up in the dump (see CR 6944195). Just issue
158       // a suitable warning in that case and do not attempt to do a collection.
159       // The latter is a subtle point, because even a failed attempt
160       // to GC will, in fact, induce one in the future, which we
161       // probably want to avoid in this case because the GC that we may
162       // be about to attempt holds value for us only
163       // if it happens now and not if it happens in the eventual
164       // future.
165       log_warning(gc)("GC locker is held; pre-dump GC was skipped");
166     }
167   }
168   HeapInspection inspect;
169   inspect.heap_inspection(_out, _parallel_thread_num);
170 }
171 
172 
173 void VM_GenCollectForAllocation::doit() {
174   SvcGCMarker sgcm(SvcGCMarker::MINOR);
175 
176   GenCollectedHeap* gch = GenCollectedHeap::heap();
177   GCCauseSetter gccs(gch, _gc_cause);
178   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
179   assert(_result == NULL || gch->is_in_reserved(_result), "result not in heap");
180 
181   if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
182     set_gc_locked();
183   }
184 }
185 
186 void VM_GenCollectFull::doit() {
187   SvcGCMarker sgcm(SvcGCMarker::FULL);
188 
189   GenCollectedHeap* gch = GenCollectedHeap::heap();
190   GCCauseSetter gccs(gch, _gc_cause);
191   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
192 }
193 
194 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
195                                                                  size_t size,
196                                                                  Metaspace::MetadataType mdtype,
197                                                                  uint gc_count_before,
198                                                                  uint full_gc_count_before,
199                                                                  GCCause::Cause gc_cause)
200     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
201       _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
202   assert(_size != 0, "An allocation should always be requested with this operation.");
203   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
204 }
205 
206 void VM_CollectForMetadataAllocation::doit() {
207   SvcGCMarker sgcm(SvcGCMarker::FULL);
208 
209   CollectedHeap* heap = Universe::heap();
210   GCCauseSetter gccs(heap, _gc_cause);
211 
212   // Check again if the space is available.  Another thread
213   // may have similarly failed a metadata allocation and induced
214   // a GC that freed space for the allocation.
215   if (!MetadataAllocationFailALot) {
216     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
217     if (_result != NULL) {
218       return;
219     }
220   }
221 
222 #if INCLUDE_G1GC
223   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
224     G1CollectedHeap::heap()->start_concurrent_gc_for_metadata_allocation(_gc_cause);
225     // For G1 expand since the collection is going to be concurrent.
226     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
227     if (_result != NULL) {
228       return;
229     }
230 
231     log_debug(gc)("G1 full GC for Metaspace");
232   }
233 #endif
234 
235   // Don't clear the soft refs yet.
236   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
237   // After a GC try to allocate without expanding.  Could fail
238   // and expansion will be tried below.
239   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
240   if (_result != NULL) {
241     return;
242   }
243 
244   // If still failing, allow the Metaspace to expand.
245   // See delta_capacity_until_GC() for explanation of the
246   // amount of the expansion.
247   // This should work unless there really is no more space
248   // or a MaxMetaspaceSize has been specified on the command line.
249   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
250   if (_result != NULL) {
251     return;
252   }
253 
254   // If expansion failed, do a collection clearing soft references.
255   heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs);
256   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
257   if (_result != NULL) {
258     return;
259   }
260 
261   log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
262 
263   if (GCLocker::is_active_and_needs_gc()) {
264     set_gc_locked();
265   }
266 }
267 
268 VM_CollectForCodeCacheAllocation::VM_CollectForCodeCacheAllocation(uint gc_count_before,
269                                                                    uint full_gc_count_before,
270                                                                    GCCause::Cause gc_cause)
271     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true) {
272 }
273 
274 void VM_CollectForCodeCacheAllocation::doit() {
275   SvcGCMarker sgcm(SvcGCMarker::FULL);
276 
277   CollectedHeap* heap = Universe::heap();
278   GCCauseSetter gccs(heap, _gc_cause);
279 
280   log_debug(gc)("Full GC for CodeCache");
281 
282   // Don't clear the soft refs yet.
283   heap->collect_as_vm_thread(GCCause::_codecache_GC_threshold);
284 
285   log_debug(gc)("After GC for CodeCache");
286 
287   if (GCLocker::is_active_and_needs_gc()) {
288     set_gc_locked();
289   }
290 }
291 
292 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
293     : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) {
294   // Only report if operation was really caused by an allocation.
295   if (_word_size != 0) {
296     AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
297   }
298 }