1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/g1/g1CollectedHeap.hpp"
 30 #include "gc/g1/g1FullCollector.inline.hpp"
 31 #include "gc/g1/g1FullGCAdjustTask.hpp"
 32 #include "gc/g1/g1FullGCCompactTask.hpp"
 33 #include "gc/g1/g1FullGCMarker.inline.hpp"
 34 #include "gc/g1/g1FullGCMarkTask.hpp"
 35 #include "gc/g1/g1FullGCPrepareTask.hpp"
 36 #include "gc/g1/g1FullGCScope.hpp"
 37 #include "gc/g1/g1OopClosures.hpp"
 38 #include "gc/g1/g1Policy.hpp"
 39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/preservedMarks.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"
 43 #include "gc/shared/slidingForwarding.hpp"
 44 #include "gc/shared/verifyOption.hpp"
 45 #include "gc/shared/weakProcessor.inline.hpp"
 46 #include "gc/shared/workerPolicy.hpp"
 47 #include "logging/log.hpp"
 48 #include "runtime/biasedLocking.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "utilities/debug.hpp"
 51 
 52 static void clear_and_activate_derived_pointers() {
 53 #if COMPILER2_OR_JVMCI
 54   DerivedPointerTable::clear();
 55 #endif
 56 }
 57 
 58 static void deactivate_derived_pointers() {
 59 #if COMPILER2_OR_JVMCI
 60   DerivedPointerTable::set_active(false);
 61 #endif
 62 }
 63 
 64 static void update_derived_pointers() {
 65 #if COMPILER2_OR_JVMCI
 66   DerivedPointerTable::update_pointers();
 67 #endif
 68 }
 69 
 70 G1CMBitMap* G1FullCollector::mark_bitmap() {
 71   return _heap->concurrent_mark()->next_mark_bitmap();
 72 }
 73 
 74 ReferenceProcessor* G1FullCollector::reference_processor() {
 75   return _heap->ref_processor_stw();
 76 }
 77 
 78 uint G1FullCollector::calc_active_workers() {
 79   G1CollectedHeap* heap = G1CollectedHeap::heap();
 80   uint max_worker_count = heap->workers()->total_workers();
 81   // Only calculate number of workers if UseDynamicNumberOfGCThreads
 82   // is enabled, otherwise use max.
 83   if (!UseDynamicNumberOfGCThreads) {
 84     return max_worker_count;
 85   }
 86 
 87   // Consider G1HeapWastePercent to decide max number of workers. Each worker
 88   // will in average cause half a region waste.
 89   uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
 90   uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
 91   uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
 92 
 93   // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
 94   // the number of workers.
 95   uint current_active_workers = heap->workers()->active_workers();
 96   uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
 97 
 98   // Finally consider the amount of used regions.
 99   uint used_worker_limit = heap->num_used_regions();
100   assert(used_worker_limit > 0, "Should never have zero used regions.");
101 
102   // Update active workers to the lower of the limits.
103   uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
104   log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, "
105                       "adaptive workers: %u, used limited workers: %u)",
106                       worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
107   worker_count = heap->workers()->update_active_workers(worker_count);
108   log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
109 
110   return worker_count;
111 }
112 
113 G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
114                                  bool explicit_gc,
115                                  bool clear_soft_refs,
116                                  bool do_maximum_compaction) :
117     _heap(heap),
118     _scope(heap->g1mm(), explicit_gc, clear_soft_refs, do_maximum_compaction),
119     _num_workers(calc_active_workers()),
120     _oop_queue_set(_num_workers),
121     _array_queue_set(_num_workers),
122     _preserved_marks_set(true),
123     _serial_compaction_point(),
124     _is_alive(this, heap->concurrent_mark()->next_mark_bitmap()),
125     _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
126     _always_subject_to_discovery(),
127     _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
128     _region_attr_table() {
129   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
130 
131   _preserved_marks_set.init(_num_workers);
132   _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
133   _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
134 
135   _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_regions(), mtGC);
136   for (uint j = 0; j < heap->max_regions(); j++) {
137     _live_stats[j].clear();
138   }
139 
140   for (uint i = 0; i < _num_workers; i++) {
141     _markers[i] = new G1FullGCMarker(this, i, _preserved_marks_set.get(i), _live_stats);
142     _compaction_points[i] = new G1FullGCCompactionPoint();
143     _oop_queue_set.register_queue(i, marker(i)->oop_stack());
144     _array_queue_set.register_queue(i, marker(i)->objarray_stack());
145   }
146   _region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
147 }
148 
149 G1FullCollector::~G1FullCollector() {
150   for (uint i = 0; i < _num_workers; i++) {
151     delete _markers[i];
152     delete _compaction_points[i];
153   }
154   FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
155   FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
156   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
157 }
158 
159 class PrepareRegionsClosure : public HeapRegionClosure {
160   G1FullCollector* _collector;
161 
162 public:
163   PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
164 
165   bool do_heap_region(HeapRegion* hr) {
166     G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
167     _collector->before_marking_update_attribute_table(hr);
168     return false;
169   }
170 };
171 
172 void G1FullCollector::prepare_collection() {
173   _heap->policy()->record_full_collection_start();
174 
175   _heap->print_heap_before_gc();
176   _heap->print_heap_regions();
177 
178   _heap->abort_concurrent_cycle();
179   _heap->verify_before_full_collection(scope()->is_explicit_gc());
180 
181   _heap->gc_prologue(true);
182   _heap->prepare_heap_for_full_collection();
183 
184   PrepareRegionsClosure cl(this);
185   _heap->heap_region_iterate(&cl);
186 
187   reference_processor()->enable_discovery();
188   reference_processor()->setup_policy(scope()->should_clear_soft_refs());
189 
190   // We should save the marks of the currently locked biased monitors.
191   // The marking doesn't preserve the marks of biased objects.
192   BiasedLocking::preserve_marks();
193 
194   // Clear and activate derived pointer collection.
195   clear_and_activate_derived_pointers();
196 }
197 
198 void G1FullCollector::collect() {
199   phase1_mark_live_objects();
200   verify_after_marking();
201 
202   // Don't add any more derived pointers during later phases
203   deactivate_derived_pointers();
204 
205   SlidingForwarding::begin();
206 
207   phase2_prepare_compaction();
208 
209   phase3_adjust_pointers();
210 
211   phase4_do_compaction();
212 
213   SlidingForwarding::end();
214 }
215 
216 void G1FullCollector::complete_collection() {
217   // Restore all marks.
218   restore_marks();
219 
220   // When the pointers have been adjusted and moved, we can
221   // update the derived pointer table.
222   update_derived_pointers();
223 
224   BiasedLocking::restore_marks();
225 
226   _heap->concurrent_mark()->swap_mark_bitmaps();
227   // Prepare the bitmap for the next (potentially concurrent) marking.
228   _heap->concurrent_mark()->clear_next_bitmap(_heap->workers());
229 
230   _heap->prepare_heap_for_mutators();
231 
232   _heap->policy()->record_full_collection_end();
233   _heap->gc_epilogue(true);
234 
235   _heap->verify_after_full_collection();
236 
237   _heap->print_heap_after_full_collection(scope()->heap_transition());
238 }
239 
240 void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
241   if (hr->is_free()) {
242     // Set as Invalid by default.
243     _region_attr_table.verify_is_invalid(hr->hrm_index());
244   } else if (hr->is_closed_archive()) {
245     _region_attr_table.set_skip_marking(hr->hrm_index());
246   } else if (hr->is_pinned()) {
247     _region_attr_table.set_skip_compacting(hr->hrm_index());
248   } else {
249     // Everything else should be compacted.
250     _region_attr_table.set_compacting(hr->hrm_index());
251   }
252 }
253 
254 class G1FullGCRefProcProxyTask : public RefProcProxyTask {
255   G1FullCollector& _collector;
256 
257 public:
258   G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
259     : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
260       _collector(collector) {}
261 
262   void work(uint worker_id) override {
263     assert(worker_id < _max_workers, "sanity");
264     G1IsAliveClosure is_alive(&_collector);
265     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
266     G1FullKeepAliveClosure keep_alive(_collector.marker(index));
267     BarrierEnqueueDiscoveredFieldClosure enqueue;
268     G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
269     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
270   }
271 };
272 
273 void G1FullCollector::phase1_mark_live_objects() {
274   // Recursively traverse all live objects and mark them.
275   GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
276 
277   {
278     // Do the actual marking.
279     G1FullGCMarkTask marking_task(this);
280     run_task(&marking_task);
281   }
282 
283   {
284     uint old_active_mt_degree = reference_processor()->num_queues();
285     reference_processor()->set_active_mt_degree(workers());
286     GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer());
287     // Process reference objects found during marking.
288     ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
289     G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues());
290     const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt);
291     scope()->tracer()->report_gc_reference_stats(stats);
292     pt.print_all_references();
293     assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
294 
295     reference_processor()->set_active_mt_degree(old_active_mt_degree);
296   }
297 
298   // Weak oops cleanup.
299   {
300     GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer());
301     WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1);
302   }
303 
304   // Class unloading and cleanup.
305   if (ClassUnloading) {
306     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
307     // Unload classes and purge the SystemDictionary.
308     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
309     _heap->complete_cleaning(&_is_alive, purged_class);
310   }
311 
312   {
313     GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer());
314     scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
315   }
316 }
317 
318 void G1FullCollector::phase2_prepare_compaction() {
319   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
320 
321   G1FullGCPrepareTask task(this);
322   run_task(&task);
323 
324   // To avoid OOM when there is memory left.
325   if (!task.has_freed_regions()) {
326     task.prepare_serial_compaction();
327   }
328 }
329 
330 void G1FullCollector::phase3_adjust_pointers() {
331   // Adjust the pointers to reflect the new locations
332   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
333 
334   G1FullGCAdjustTask task(this);
335   run_task(&task);
336 }
337 
338 void G1FullCollector::phase4_do_compaction() {
339   // Compact the heap using the compaction queues created in phase 2.
340   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
341   G1FullGCCompactTask task(this);
342   run_task(&task);
343 
344   // Serial compact to avoid OOM when very few free regions.
345   if (serial_compaction_point()->has_regions()) {
346     task.serial_compaction();
347   }
348 }
349 
350 void G1FullCollector::restore_marks() {
351   _preserved_marks_set.restore(_heap->workers());
352   _preserved_marks_set.reclaim();
353 }
354 
355 void G1FullCollector::run_task(AbstractGangTask* task) {
356   _heap->workers()->run_task(task, _num_workers);
357 }
358 
359 void G1FullCollector::verify_after_marking() {
360   if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
361     // Only do verification if VerifyDuringGC and G1VerifyFull is set.
362     return;
363   }
364 
365 #if COMPILER2_OR_JVMCI
366   DerivedPointerTableDeactivate dpt_deact;
367 #endif
368   _heap->prepare_for_verify();
369   // Note: we can verify only the heap here. When an object is
370   // marked, the previous value of the mark word (including
371   // identity hash values, ages, etc) is preserved, and the mark
372   // word is set to markWord::marked_value - effectively removing
373   // any hash values from the mark word. These hash values are
374   // used when verifying the dictionaries and so removing them
375   // from the mark word can make verification of the dictionaries
376   // fail. At the end of the GC, the original mark word values
377   // (including hash values) are restored to the appropriate
378   // objects.
379   GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
380   _heap->verify(VerifyOption_G1UseFullMarking);
381 }