1 /*
  2  * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "compiler/oopMap.hpp"
 30 #include "gc/g1/g1CollectedHeap.hpp"
 31 #include "gc/g1/g1FullCollector.inline.hpp"
 32 #include "gc/g1/g1FullGCAdjustTask.hpp"
 33 #include "gc/g1/g1FullGCCompactTask.hpp"
 34 #include "gc/g1/g1FullGCMarker.inline.hpp"
 35 #include "gc/g1/g1FullGCMarkTask.hpp"
 36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 37 #include "gc/g1/g1FullGCScope.hpp"
 38 #include "gc/g1/g1OopClosures.hpp"
 39 #include "gc/g1/g1Policy.hpp"
 40 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 41 #include "gc/shared/gcTraceTime.inline.hpp"
 42 #include "gc/shared/preservedMarks.hpp"
 43 #include "gc/shared/referenceProcessor.hpp"
 44 #include "gc/shared/slidingForwarding.hpp"
 45 #include "gc/shared/verifyOption.hpp"
 46 #include "gc/shared/weakProcessor.inline.hpp"
 47 #include "gc/shared/workerPolicy.hpp"
 48 #include "logging/log.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "utilities/debug.hpp"
 51 
 52 static void clear_and_activate_derived_pointers() {
 53 #if COMPILER2_OR_JVMCI
 54   DerivedPointerTable::clear();
 55 #endif
 56 }
 57 
 58 static void deactivate_derived_pointers() {
 59 #if COMPILER2_OR_JVMCI
 60   DerivedPointerTable::set_active(false);
 61 #endif
 62 }
 63 
 64 static void update_derived_pointers() {
 65 #if COMPILER2_OR_JVMCI
 66   DerivedPointerTable::update_pointers();
 67 #endif
 68 }
 69 
 70 G1CMBitMap* G1FullCollector::mark_bitmap() {
 71   return _heap->concurrent_mark()->mark_bitmap();
 72 }
 73 
 74 ReferenceProcessor* G1FullCollector::reference_processor() {
 75   return _heap->ref_processor_stw();
 76 }
 77 
 78 uint G1FullCollector::calc_active_workers() {
 79   G1CollectedHeap* heap = G1CollectedHeap::heap();
 80   uint max_worker_count = heap->workers()->max_workers();
 81   // Only calculate number of workers if UseDynamicNumberOfGCThreads
 82   // is enabled, otherwise use max.
 83   if (!UseDynamicNumberOfGCThreads) {
 84     return max_worker_count;
 85   }
 86 
 87   // Consider G1HeapWastePercent to decide max number of workers. Each worker
 88   // will in average cause half a region waste.
 89   uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
 90   uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
 91   uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
 92 
 93   // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
 94   // the number of workers.
 95   uint current_active_workers = heap->workers()->active_workers();
 96   uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
 97 
 98   // Finally consider the amount of used regions.
 99   uint used_worker_limit = heap->num_used_regions();
100   assert(used_worker_limit > 0, "Should never have zero used regions.");
101 
102   // Update active workers to the lower of the limits.
103   uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
104   log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, "
105                       "adaptive workers: %u, used limited workers: %u)",
106                       worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
107   worker_count = heap->workers()->set_active_workers(worker_count);
108   log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
109 
110   return worker_count;
111 }
112 
113 G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
114                                  bool explicit_gc,
115                                  bool clear_soft_refs,
116                                  bool do_maximal_compaction,
117                                  G1FullGCTracer* tracer) :
118     _heap(heap),
119     _scope(heap->monitoring_support(), explicit_gc, clear_soft_refs, do_maximal_compaction, tracer),
120     _num_workers(calc_active_workers()),
121     _oop_queue_set(_num_workers),
122     _array_queue_set(_num_workers),
123     _preserved_marks_set(true),
124     _serial_compaction_point(this),
125     _is_alive(this, heap->concurrent_mark()->mark_bitmap()),
126     _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
127     _always_subject_to_discovery(),
128     _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
129     _region_attr_table() {
130   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
131 
132   _preserved_marks_set.init(_num_workers);
133   _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
134   _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
135 
136   _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_regions(), mtGC);
137   _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_regions(), mtGC);
138   for (uint j = 0; j < heap->max_regions(); j++) {
139     _live_stats[j].clear();
140     _compaction_tops[j] = nullptr;
141   }
142 
143   for (uint i = 0; i < _num_workers; i++) {
144     _markers[i] = new G1FullGCMarker(this, i, _preserved_marks_set.get(i), _live_stats);
145     _compaction_points[i] = new G1FullGCCompactionPoint(this);
146     _oop_queue_set.register_queue(i, marker(i)->oop_stack());
147     _array_queue_set.register_queue(i, marker(i)->objarray_stack());
148   }
149   _region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
150 }
151 
152 G1FullCollector::~G1FullCollector() {
153   for (uint i = 0; i < _num_workers; i++) {
154     delete _markers[i];
155     delete _compaction_points[i];
156   }
157   FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
158   FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
159   FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
160   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
161 }
162 
163 class PrepareRegionsClosure : public HeapRegionClosure {
164   G1FullCollector* _collector;
165 
166 public:
167   PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
168 
169   bool do_heap_region(HeapRegion* hr) {
170     G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
171     _collector->before_marking_update_attribute_table(hr);
172     return false;
173   }
174 };
175 
176 void G1FullCollector::prepare_collection() {
177   _heap->policy()->record_full_collection_start();
178 
179   // Verification needs the bitmap, so we should clear the bitmap only later.
180   bool in_concurrent_cycle = _heap->abort_concurrent_cycle();
181   _heap->verify_before_full_collection(scope()->is_explicit_gc());
182   if (in_concurrent_cycle) {
183     GCTraceTime(Debug, gc) debug("Clear Bitmap");
184     _heap->concurrent_mark()->clear_bitmap(_heap->workers());
185   }
186 
187   _heap->gc_prologue(true);
188   _heap->retire_tlabs();
189   _heap->prepare_heap_for_full_collection();
190 
191   PrepareRegionsClosure cl(this);
192   _heap->heap_region_iterate(&cl);
193 
194   reference_processor()->start_discovery(scope()->should_clear_soft_refs());
195 
196   // Clear and activate derived pointer collection.
197   clear_and_activate_derived_pointers();
198 }
199 
200 void G1FullCollector::collect() {
201   G1CollectedHeap::start_codecache_marking_cycle_if_inactive();
202 
203   phase1_mark_live_objects();
204   verify_after_marking();
205 
206   // Don't add any more derived pointers during later phases
207   deactivate_derived_pointers();
208 
209   phase2_prepare_compaction();
210 
211   phase3_adjust_pointers();
212 
213   phase4_do_compaction();
214 
215   CodeCache::on_gc_marking_cycle_finish();
216   CodeCache::arm_all_nmethods();
217 }
218 
219 void G1FullCollector::complete_collection() {
220   // Restore all marks.
221   restore_marks();
222 
223   // When the pointers have been adjusted and moved, we can
224   // update the derived pointer table.
225   update_derived_pointers();
226 
227   // Need completely cleared claim bits for the next concurrent marking or full gc.
228   ClassLoaderDataGraph::clear_claimed_marks();
229 
230   // Prepare the bitmap for the next (potentially concurrent) marking.
231   _heap->concurrent_mark()->clear_bitmap(_heap->workers());
232 
233   _heap->prepare_heap_for_mutators();
234 
235   _heap->resize_all_tlabs();
236 
237   _heap->policy()->record_full_collection_end();
238   _heap->gc_epilogue(true);
239 
240   _heap->verify_after_full_collection();
241 }
242 
243 void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
244   if (hr->is_free()) {
245     _region_attr_table.set_free(hr->hrm_index());
246   } else if (hr->is_closed_archive()) {
247     _region_attr_table.set_skip_marking(hr->hrm_index());
248   } else if (hr->is_pinned()) {
249     _region_attr_table.set_skip_compacting(hr->hrm_index());
250   } else {
251     // Everything else should be compacted.
252     _region_attr_table.set_compacting(hr->hrm_index());
253   }
254 }
255 
256 class G1FullGCRefProcProxyTask : public RefProcProxyTask {
257   G1FullCollector& _collector;
258 
259 public:
260   G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
261     : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
262       _collector(collector) {}
263 
264   void work(uint worker_id) override {
265     assert(worker_id < _max_workers, "sanity");
266     G1IsAliveClosure is_alive(&_collector);
267     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
268     G1FullKeepAliveClosure keep_alive(_collector.marker(index));
269     BarrierEnqueueDiscoveredFieldClosure enqueue;
270     G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
271     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
272   }
273 };
274 
275 void G1FullCollector::phase1_mark_live_objects() {
276   // Recursively traverse all live objects and mark them.
277   GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
278 
279   {
280     // Do the actual marking.
281     G1FullGCMarkTask marking_task(this);
282     run_task(&marking_task);
283   }
284 
285   {
286     uint old_active_mt_degree = reference_processor()->num_queues();
287     reference_processor()->set_active_mt_degree(workers());
288     GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer());
289     // Process reference objects found during marking.
290     ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
291     G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues());
292     const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt);
293     scope()->tracer()->report_gc_reference_stats(stats);
294     pt.print_all_references();
295     assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
296 
297     reference_processor()->set_active_mt_degree(old_active_mt_degree);
298   }
299 
300   // Weak oops cleanup.
301   {
302     GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer());
303     WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1);
304   }
305 
306   // Class unloading and cleanup.
307   if (ClassUnloading) {
308     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
309     CodeCache::UnloadingScope unloading_scope(&_is_alive);
310     // Unload classes and purge the SystemDictionary.
311     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
312     _heap->complete_cleaning(purged_class);
313   }
314 
315   scope()->tracer()->report_object_count_after_gc(&_is_alive);
316 #if TASKQUEUE_STATS
317   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
318   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
319 #endif
320 }
321 
322 void G1FullCollector::phase2_prepare_compaction() {
323   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
324 
325   _heap->forwarding()->clear();
326 
327   phase2a_determine_worklists();
328 
329   bool has_free_compaction_targets = phase2b_forward_oops();
330 
331   // Try to avoid OOM immediately after Full GC in case there are no free regions
332   // left after determining the result locations (i.e. this phase). Prepare to
333   // maximally compact the tail regions of the compaction queues serially.
334   // TODO: Disabled for now because it violates sliding-forwarding assumption.
335   // if (!has_free_compaction_targets) {
336   //   phase2c_prepare_serial_compaction();
337   // }
338 }
339 
340 void G1FullCollector::phase2a_determine_worklists() {
341   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
342 
343   G1DetermineCompactionQueueClosure cl(this);
344   _heap->heap_region_iterate(&cl);
345 }
346 
347 bool G1FullCollector::phase2b_forward_oops() {
348   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
349 
350   G1FullGCPrepareTask task(this);
351   run_task(&task);
352 
353   return task.has_free_compaction_targets();
354 }
355 
356 void G1FullCollector::phase2c_prepare_serial_compaction() {
357   ShouldNotReachHere(); // Disabled in Lilliput.
358   //GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
359   // At this point we know that after parallel compaction there will be no
360   // completely free regions. That means that the last region of
361   // all compaction queues still have data in them. We try to compact
362   // these regions in serial to avoid a premature OOM when the mutator wants
363   // to allocate the first eden region after gc.
364   /*
365   for (uint i = 0; i < workers(); i++) {
366     G1FullGCCompactionPoint* cp = compaction_point(i);
367     if (cp->has_regions()) {
368       serial_compaction_point()->add(cp->remove_last());
369     }
370   }
371   */
372 
373   // Update the forwarding information for the regions in the serial
374   // compaction point.
375   /*
376   G1FullGCCompactionPoint* cp = serial_compaction_point();
377   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
378     HeapRegion* current = *it;
379     if (!cp->is_initialized()) {
380       // Initialize the compaction point. Nothing more is needed for the first heap region
381       // since it is already prepared for compaction.
382       cp->initialize(current);
383     } else {
384       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
385       G1SerialRePrepareClosure re_prepare(cp, current);
386       set_compaction_top(current, current->bottom());
387       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
388     }
389   }
390   cp->update();
391   */
392 }
393 
394 void G1FullCollector::phase3_adjust_pointers() {
395   // Adjust the pointers to reflect the new locations
396   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
397 
398   G1FullGCAdjustTask task(this);
399   run_task(&task);
400 }
401 
402 void G1FullCollector::phase4_do_compaction() {
403   // Compact the heap using the compaction queues created in phase 2.
404   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
405   G1FullGCCompactTask task(this);
406   run_task(&task);
407 
408   // Serial compact to avoid OOM when very few free regions.
409   if (serial_compaction_point()->has_regions()) {
410     task.serial_compaction();
411   }
412 }
413 
414 void G1FullCollector::restore_marks() {
415   _preserved_marks_set.restore(_heap->workers());
416   _preserved_marks_set.reclaim();
417 }
418 
419 void G1FullCollector::run_task(WorkerTask* task) {
420   _heap->workers()->run_task(task, _num_workers);
421 }
422 
423 void G1FullCollector::verify_after_marking() {
424   if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
425     // Only do verification if VerifyDuringGC and G1VerifyFull is set.
426     return;
427   }
428 
429 #if COMPILER2_OR_JVMCI
430   DerivedPointerTableDeactivate dpt_deact;
431 #endif
432   _heap->prepare_for_verify();
433   // Note: we can verify only the heap here. When an object is
434   // marked, the previous value of the mark word (including
435   // identity hash values, ages, etc) is preserved, and the mark
436   // word is set to markWord::marked_value - effectively removing
437   // any hash values from the mark word. These hash values are
438   // used when verifying the dictionaries and so removing them
439   // from the mark word can make verification of the dictionaries
440   // fail. At the end of the GC, the original mark word values
441   // (including hash values) are restored to the appropriate
442   // objects.
443   GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
444   _heap->verify(VerifyOption::G1UseFullMarking);
445 }