1 /*
2 * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "code/codeCache.hpp"
28 #include "compiler/oopMap.hpp"
29 #include "gc/g1/g1CollectedHeap.hpp"
30 #include "gc/g1/g1FullCollector.inline.hpp"
31 #include "gc/g1/g1FullGCAdjustTask.hpp"
32 #include "gc/g1/g1FullGCCompactTask.hpp"
33 #include "gc/g1/g1FullGCMarker.inline.hpp"
34 #include "gc/g1/g1FullGCMarkTask.hpp"
35 #include "gc/g1/g1FullGCPrepareTask.hpp"
36 #include "gc/g1/g1FullGCScope.hpp"
37 #include "gc/g1/g1OopClosures.hpp"
38 #include "gc/g1/g1Policy.hpp"
39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
40 #include "gc/shared/gcTraceTime.inline.hpp"
41 #include "gc/shared/preservedMarks.hpp"
42 #include "gc/shared/referenceProcessor.hpp"
43 #include "gc/shared/verifyOption.hpp"
44 #include "gc/shared/weakProcessor.inline.hpp"
45 #include "gc/shared/workerPolicy.hpp"
46 #include "logging/log.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "utilities/debug.hpp"
50
51 static void clear_and_activate_derived_pointers() {
52 #if COMPILER2_OR_JVMCI
53 DerivedPointerTable::clear();
54 #endif
55 }
56
57 static void deactivate_derived_pointers() {
58 #if COMPILER2_OR_JVMCI
59 DerivedPointerTable::set_active(false);
60 #endif
61 }
62
63 static void update_derived_pointers() {
64 #if COMPILER2_OR_JVMCI
65 DerivedPointerTable::update_pointers();
66 #endif
67 }
68
69 G1CMBitMap* G1FullCollector::mark_bitmap() {
70 return _heap->concurrent_mark()->next_mark_bitmap();
71 }
72
73 ReferenceProcessor* G1FullCollector::reference_processor() {
74 return _heap->ref_processor_stw();
75 }
76
77 uint G1FullCollector::calc_active_workers() {
78 G1CollectedHeap* heap = G1CollectedHeap::heap();
79 uint max_worker_count = heap->workers()->total_workers();
80 // Only calculate number of workers if UseDynamicNumberOfGCThreads
81 // is enabled, otherwise use max.
82 if (!UseDynamicNumberOfGCThreads) {
83 return max_worker_count;
84 }
85
86 // Consider G1HeapWastePercent to decide max number of workers. Each worker
87 // will in average cause half a region waste.
88 uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
89 uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
90 uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
91
92 // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
93 // the number of workers.
94 uint current_active_workers = heap->workers()->active_workers();
95 uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
96
97 // Finally consider the amount of used regions.
98 uint used_worker_limit = heap->num_used_regions();
99 assert(used_worker_limit > 0, "Should never have zero used regions.");
100
101 // Update active workers to the lower of the limits.
102 uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
103 log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, "
104 "adaptive workers: %u, used limited workers: %u)",
105 worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
106 worker_count = heap->workers()->update_active_workers(worker_count);
107 log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
108
109 return worker_count;
110 }
111
112 G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
113 bool explicit_gc,
114 bool clear_soft_refs,
115 bool do_maximum_compaction) :
116 _heap(heap),
117 _scope(heap->g1mm(), explicit_gc, clear_soft_refs, do_maximum_compaction),
118 _num_workers(calc_active_workers()),
119 _oop_queue_set(_num_workers),
120 _array_queue_set(_num_workers),
121 _preserved_marks_set(true),
122 _serial_compaction_point(),
123 _is_alive(this, heap->concurrent_mark()->next_mark_bitmap()),
124 _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
125 _always_subject_to_discovery(),
126 _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
127 _region_attr_table() {
128 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
129
130 _preserved_marks_set.init(_num_workers);
131 _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
132 _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
133
134 _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_regions(), mtGC);
135 for (uint j = 0; j < heap->max_regions(); j++) {
136 _live_stats[j].clear();
137 }
138
139 for (uint i = 0; i < _num_workers; i++) {
140 _markers[i] = new G1FullGCMarker(this, i, _preserved_marks_set.get(i), _live_stats);
141 _compaction_points[i] = new G1FullGCCompactionPoint();
142 _oop_queue_set.register_queue(i, marker(i)->oop_stack());
143 _array_queue_set.register_queue(i, marker(i)->objarray_stack());
144 }
145 _region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
146 }
147
148 G1FullCollector::~G1FullCollector() {
149 for (uint i = 0; i < _num_workers; i++) {
150 delete _markers[i];
151 delete _compaction_points[i];
152 }
153 FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
154 FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
155 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
156 }
157
158 class PrepareRegionsClosure : public HeapRegionClosure {
159 G1FullCollector* _collector;
160
161 public:
162 PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
163
164 bool do_heap_region(HeapRegion* hr) {
165 G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
166 _collector->before_marking_update_attribute_table(hr);
167 return false;
168 }
169 };
170
171 void G1FullCollector::prepare_collection() {
172 _heap->policy()->record_full_collection_start();
173
174 _heap->print_heap_before_gc();
175 _heap->print_heap_regions();
176
177 _heap->abort_concurrent_cycle();
178 _heap->verify_before_full_collection(scope()->is_explicit_gc());
179
180 _heap->gc_prologue(true);
181 _heap->prepare_heap_for_full_collection();
182
183 PrepareRegionsClosure cl(this);
184 _heap->heap_region_iterate(&cl);
185
186 reference_processor()->enable_discovery();
187 reference_processor()->setup_policy(scope()->should_clear_soft_refs());
188
189 // We should save the marks of the currently locked biased monitors.
190 // The marking doesn't preserve the marks of biased objects.
191 BiasedLocking::preserve_marks();
192
193 // Clear and activate derived pointer collection.
194 clear_and_activate_derived_pointers();
195 }
196
197 void G1FullCollector::collect() {
198 phase1_mark_live_objects();
199 verify_after_marking();
200
201 // Don't add any more derived pointers during later phases
202 deactivate_derived_pointers();
203
204 phase2_prepare_compaction();
205
206 phase3_adjust_pointers();
207
208 phase4_do_compaction();
209 }
210
211 void G1FullCollector::complete_collection() {
212 // Restore all marks.
213 restore_marks();
214
215 // When the pointers have been adjusted and moved, we can
216 // update the derived pointer table.
217 update_derived_pointers();
218
219 BiasedLocking::restore_marks();
220
221 _heap->concurrent_mark()->swap_mark_bitmaps();
222 // Prepare the bitmap for the next (potentially concurrent) marking.
223 _heap->concurrent_mark()->clear_next_bitmap(_heap->workers());
224
225 _heap->prepare_heap_for_mutators();
226
227 _heap->policy()->record_full_collection_end();
228 _heap->gc_epilogue(true);
229
230 _heap->verify_after_full_collection();
231
232 _heap->print_heap_after_full_collection(scope()->heap_transition());
233 }
234
235 void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
236 if (hr->is_free()) {
237 // Set as Invalid by default.
238 _region_attr_table.verify_is_invalid(hr->hrm_index());
239 } else if (hr->is_closed_archive()) {
240 _region_attr_table.set_skip_marking(hr->hrm_index());
241 } else if (hr->is_pinned()) {
242 _region_attr_table.set_skip_compacting(hr->hrm_index());
243 } else {
244 // Everything else should be compacted.
245 _region_attr_table.set_compacting(hr->hrm_index());
246 }
247 }
248
249 class G1FullGCRefProcProxyTask : public RefProcProxyTask {
250 G1FullCollector& _collector;
251
252 public:
253 G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
254 : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
255 _collector(collector) {}
256
257 void work(uint worker_id) override {
258 assert(worker_id < _max_workers, "sanity");
259 G1IsAliveClosure is_alive(&_collector);
260 uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
261 G1FullKeepAliveClosure keep_alive(_collector.marker(index));
262 BarrierEnqueueDiscoveredFieldClosure enqueue;
263 G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
264 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
265 }
266 };
267
268 void G1FullCollector::phase1_mark_live_objects() {
269 // Recursively traverse all live objects and mark them.
270 GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
271
272 {
273 // Do the actual marking.
274 G1FullGCMarkTask marking_task(this);
275 run_task(&marking_task);
276 }
277
278 {
279 uint old_active_mt_degree = reference_processor()->num_queues();
280 reference_processor()->set_active_mt_degree(workers());
281 GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer());
282 // Process reference objects found during marking.
283 ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
284 G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues());
285 const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt);
286 scope()->tracer()->report_gc_reference_stats(stats);
287 pt.print_all_references();
288 assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
289
290 reference_processor()->set_active_mt_degree(old_active_mt_degree);
291 }
292
293 // Weak oops cleanup.
294 {
295 GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer());
296 WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1);
297 }
298
299 // Class unloading and cleanup.
300 if (ClassUnloading) {
301 GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
302 // Unload classes and purge the SystemDictionary.
303 bool purged_class = SystemDictionary::do_unloading(scope()->timer());
304 _heap->complete_cleaning(&_is_alive, purged_class);
305 }
306
307 {
308 GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer());
309 scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
310 }
311 }
312
313 void G1FullCollector::phase2_prepare_compaction() {
314 GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
315 G1FullGCPrepareTask task(this);
316 run_task(&task);
317
318 // To avoid OOM when there is memory left.
319 if (!task.has_freed_regions()) {
320 task.prepare_serial_compaction();
321 }
322 }
323
324 void G1FullCollector::phase3_adjust_pointers() {
325 // Adjust the pointers to reflect the new locations
326 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
327
328 G1FullGCAdjustTask task(this);
329 run_task(&task);
330 }
331
332 void G1FullCollector::phase4_do_compaction() {
333 // Compact the heap using the compaction queues created in phase 2.
334 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
335 G1FullGCCompactTask task(this);
336 run_task(&task);
337
338 // Serial compact to avoid OOM when very few free regions.
339 if (serial_compaction_point()->has_regions()) {
340 task.serial_compaction();
341 }
342 }
343
344 void G1FullCollector::restore_marks() {
345 _preserved_marks_set.restore(_heap->workers());
346 _preserved_marks_set.reclaim();
347 }
348
349 void G1FullCollector::run_task(AbstractGangTask* task) {
350 _heap->workers()->run_task(task, _num_workers);
351 }
352
353 void G1FullCollector::verify_after_marking() {
354 if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
355 // Only do verification if VerifyDuringGC and G1VerifyFull is set.
356 return;
357 }
358
359 #if COMPILER2_OR_JVMCI
360 DerivedPointerTableDeactivate dpt_deact;
361 #endif
362 _heap->prepare_for_verify();
363 // Note: we can verify only the heap here. When an object is
364 // marked, the previous value of the mark word (including
365 // identity hash values, ages, etc) is preserved, and the mark
366 // word is set to markWord::marked_value - effectively removing
367 // any hash values from the mark word. These hash values are
368 // used when verifying the dictionaries and so removing them
369 // from the mark word can make verification of the dictionaries
370 // fail. At the end of the GC, the original mark word values
371 // (including hash values) are restored to the appropriate
372 // objects.
373 GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
374 _heap->verify(VerifyOption_G1UseFullMarking);
375 }