1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "gc/g1/g1CollectedHeap.hpp"
27 #include "gc/g1/g1FullCollector.inline.hpp"
28 #include "gc/g1/g1FullGCAdjustTask.hpp"
29 #include "gc/g1/g1FullGCCompactTask.hpp"
30 #include "gc/g1/g1FullGCMarker.inline.hpp"
31 #include "gc/g1/g1FullGCMarkTask.hpp"
32 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
33 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
34 #include "gc/g1/g1FullGCScope.hpp"
35 #include "gc/g1/g1OopClosures.hpp"
36 #include "gc/g1/g1Policy.hpp"
37 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
38 #include "gc/shared/classUnloadingContext.hpp"
39 #include "gc/shared/gcTraceTime.inline.hpp"
40 #include "gc/shared/preservedMarks.inline.hpp"
41 #include "gc/shared/referenceProcessor.hpp"
42 #include "gc/shared/verifyOption.hpp"
43 #include "gc/shared/weakProcessor.inline.hpp"
44 #include "gc/shared/workerPolicy.hpp"
45 #include "logging/log.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "utilities/debug.hpp"
48
49 static void clear_and_activate_derived_pointers() {
50 #if COMPILER2_OR_JVMCI
51 DerivedPointerTable::clear();
52 #endif
53 }
54
55 static void deactivate_derived_pointers() {
56 #if COMPILER2_OR_JVMCI
57 DerivedPointerTable::set_active(false);
58 #endif
59 }
60
61 static void update_derived_pointers() {
62 #if COMPILER2_OR_JVMCI
63 DerivedPointerTable::update_pointers();
64 #endif
65 }
66
67 G1CMBitMap* G1FullCollector::mark_bitmap() {
68 return _heap->concurrent_mark()->mark_bitmap();
69 }
70
71 ReferenceProcessor* G1FullCollector::reference_processor() {
72 return _heap->ref_processor_stw();
73 }
74
75 uint G1FullCollector::calc_active_workers() {
76 G1CollectedHeap* heap = G1CollectedHeap::heap();
77 uint max_worker_count = heap->workers()->max_workers();
78 // Only calculate number of workers if UseDynamicNumberOfGCThreads
79 // is enabled, otherwise use max.
80 if (!UseDynamicNumberOfGCThreads) {
81 return max_worker_count;
82 }
83
84 // Consider G1HeapWastePercent to decide max number of workers. Each worker
85 // will in average cause half a region waste.
86 uint max_wasted_regions_allowed = ((heap->num_committed_regions() * G1HeapWastePercent) / 100);
87 uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
88 uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
89
90 // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
91 // the number of workers.
92 uint current_active_workers = heap->workers()->active_workers();
93 uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
94
95 // Finally consider the amount of used regions.
96 uint used_worker_limit = heap->num_used_regions();
97 assert(used_worker_limit > 0, "Should never have zero used regions.");
98
99 // Update active workers to the lower of the limits.
100 uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
101 log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, "
102 "adaptive workers: %u, used limited workers: %u)",
103 worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
104 worker_count = heap->workers()->set_active_workers(worker_count);
105 log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
106
107 return worker_count;
108 }
109
110 G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
111 bool clear_soft_refs,
112 bool do_maximal_compaction,
113 G1FullGCTracer* tracer) :
114 _heap(heap),
115 _scope(heap->monitoring_support(), clear_soft_refs, do_maximal_compaction, tracer),
116 _num_workers(calc_active_workers()),
117 _has_compaction_targets(false),
118 _has_humongous(false),
119 _oop_queue_set(_num_workers),
120 _array_queue_set(_num_workers),
121 _preserved_marks_set(true),
122 _serial_compaction_point(this, nullptr),
123 _humongous_compaction_point(this, nullptr),
124 _is_alive(this, heap->concurrent_mark()->mark_bitmap()),
125 _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
126 _humongous_compaction_regions(8),
127 _always_subject_to_discovery(),
128 _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
129 _region_attr_table() {
130 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
131
132 _preserved_marks_set.init(_num_workers);
133 _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
134 _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
135
136 _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_num_regions(), mtGC);
137 _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_num_regions(), mtGC);
138 for (uint j = 0; j < heap->max_num_regions(); j++) {
139 _live_stats[j].clear();
140 _compaction_tops[j] = nullptr;
141 }
142
143 for (uint i = 0; i < _num_workers; i++) {
144 _markers[i] = new G1FullGCMarker(this, i, _live_stats);
145 _compaction_points[i] = new G1FullGCCompactionPoint(this, _preserved_marks_set.get(i));
146 _oop_queue_set.register_queue(i, marker(i)->oop_stack());
147 _array_queue_set.register_queue(i, marker(i)->objarray_stack());
148 }
149 _serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
150 _humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
151 _region_attr_table.initialize(heap->reserved(), G1HeapRegion::GrainBytes);
152 }
153
154 G1FullCollector::~G1FullCollector() {
155 for (uint i = 0; i < _num_workers; i++) {
156 delete _markers[i];
157 delete _compaction_points[i];
158 }
159
160 FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
161 FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
162 FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
163 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
164 }
165
166 class PrepareRegionsClosure : public G1HeapRegionClosure {
167 G1FullCollector* _collector;
168
169 public:
170 PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
171
172 bool do_heap_region(G1HeapRegion* hr) {
173 hr->prepare_for_full_gc();
174 G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
175 _collector->before_marking_update_attribute_table(hr);
176 return false;
177 }
178 };
179
180 void G1FullCollector::prepare_collection() {
181 _heap->policy()->record_full_collection_start();
182
183 // Verification needs the bitmap, so we should clear the bitmap only later.
184 bool in_concurrent_cycle = _heap->abort_concurrent_cycle();
185 _heap->verify_before_full_collection();
186 if (in_concurrent_cycle) {
187 GCTraceTime(Debug, gc) debug("Clear Bitmap");
188 _heap->concurrent_mark()->clear_bitmap(_heap->workers());
189 }
190
191 _heap->gc_prologue(true);
192 _heap->retire_tlabs();
193 _heap->flush_region_pin_cache();
194 _heap->prepare_heap_for_full_collection();
195
196 PrepareRegionsClosure cl(this);
197 _heap->heap_region_iterate(&cl);
198
199 reference_processor()->start_discovery(scope()->should_clear_soft_refs());
200
201 // Clear and activate derived pointer collection.
202 clear_and_activate_derived_pointers();
203 }
204
205 void G1FullCollector::collect() {
206 G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
207
208 phase1_mark_live_objects();
209 verify_after_marking();
210
211 // Don't add any more derived pointers during later phases
212 deactivate_derived_pointers();
213
214 phase2_prepare_compaction();
215
216 if (has_compaction_targets()) {
217 phase3_adjust_pointers();
218
219 phase4_do_compaction();
220 } else {
221 // All regions have a high live ratio thus will not be compacted.
222 // The live ratio is only considered if do_maximal_compaction is false.
223 log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
224 }
225
226 phase5_reset_metadata();
227
228 G1CollectedHeap::finish_codecache_marking_cycle();
229 }
230
231 void G1FullCollector::complete_collection(size_t allocation_word_size) {
232 // Restore all marks.
233 restore_marks();
234
235 // When the pointers have been adjusted and moved, we can
236 // update the derived pointer table.
237 update_derived_pointers();
238
239 // Need completely cleared claim bits for the next concurrent marking or full gc.
240 ClassLoaderDataGraph::clear_claimed_marks();
241
242 // Prepare the bitmap for the next (potentially concurrent) marking.
243 _heap->concurrent_mark()->clear_bitmap(_heap->workers());
244
245 _heap->prepare_for_mutator_after_full_collection(allocation_word_size);
246
247 _heap->resize_all_tlabs();
248
249 _heap->young_regions_cset_group()->clear();
250
251 _heap->policy()->record_full_collection_end();
252 _heap->gc_epilogue(true);
253
254 _heap->verify_after_full_collection();
255
256 _heap->print_heap_after_full_collection();
257 }
258
259 void G1FullCollector::before_marking_update_attribute_table(G1HeapRegion* hr) {
260 if (hr->is_free()) {
261 _region_attr_table.set_free(hr->hrm_index());
262 } else if (hr->is_humongous() || hr->has_pinned_objects()) {
263 // Humongous objects or pinned regions will never be moved in the "main"
264 // compaction phase, but non-pinned regions might afterwards in a special phase.
265 _region_attr_table.set_skip_compacting(hr->hrm_index());
266 } else {
267 // Everything else should be compacted.
268 _region_attr_table.set_compacting(hr->hrm_index());
269 }
270 }
271
272 class G1FullGCRefProcProxyTask : public RefProcProxyTask {
273 G1FullCollector& _collector;
274
275 public:
276 G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
277 : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
278 _collector(collector) {}
279
280 void work(uint worker_id) override {
281 assert(worker_id < _max_workers, "sanity");
282 G1IsAliveClosure is_alive(&_collector);
283 uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
284 G1FullKeepAliveClosure keep_alive(_collector.marker(index));
285 BarrierEnqueueDiscoveredFieldClosure enqueue;
286 G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
287 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
288 }
289 };
290
291 void G1FullCollector::phase1_mark_live_objects() {
292 // Recursively traverse all live objects and mark them.
293 GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
294
295 {
296 // Do the actual marking.
297 G1FullGCMarkTask marking_task(this);
298 run_task(&marking_task);
299 }
300
301 {
302 GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer());
303 // Process reference objects found during marking.
304 ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
305 G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues());
306 const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, _heap->workers(), pt);
307 scope()->tracer()->report_gc_reference_stats(stats);
308 pt.print_all_references();
309 assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
310 }
311
312 {
313 GCTraceTime(Debug, gc, phases) debug("Phase 1: Flush Mark Stats Cache", scope()->timer());
314 for (uint i = 0; i < workers(); i++) {
315 marker(i)->flush_mark_stats_cache();
316 }
317 }
318
319 // Weak oops cleanup.
320 {
321 GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer());
322 WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1);
323 }
324
325 // Class unloading and cleanup.
326 if (ClassUnloading) {
327 _heap->unload_classes_and_code("Phase 1: Class Unloading and Cleanup", &_is_alive, scope()->timer());
328 }
329
330 {
331 GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer());
332 scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
333 }
334 #if TASKQUEUE_STATS
335 oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
336 array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
337 #endif
338 }
339
340 void G1FullCollector::phase2_prepare_compaction() {
341 GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
342
343 phase2a_determine_worklists();
344
345 if (!has_compaction_targets()) {
346 return;
347 }
348
349 bool has_free_compaction_targets = phase2b_forward_oops();
350
351 // Try to avoid OOM immediately after Full GC in case there are no free regions
352 // left after determining the result locations (i.e. this phase). Prepare to
353 // maximally compact the tail regions of the compaction queues serially.
354 if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
355 phase2c_prepare_serial_compaction();
356
357 if (scope()->do_maximal_compaction() &&
358 has_humongous() &&
359 serial_compaction_point()->has_regions()) {
360 phase2d_prepare_humongous_compaction();
361 }
362 }
363 }
364
365 void G1FullCollector::phase2a_determine_worklists() {
366 GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
367
368 G1DetermineCompactionQueueClosure cl(this);
369 _heap->heap_region_iterate(&cl);
370 }
371
372 bool G1FullCollector::phase2b_forward_oops() {
373 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
374
375 G1FullGCPrepareTask task(this);
376 run_task(&task);
377
378 return task.has_free_compaction_targets();
379 }
380
381 uint G1FullCollector::truncate_parallel_cps() {
382 uint lowest_current = UINT_MAX;
383 for (uint i = 0; i < workers(); i++) {
384 G1FullGCCompactionPoint* cp = compaction_point(i);
385 if (cp->has_regions()) {
386 lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
387 }
388 }
389
390 for (uint i = 0; i < workers(); i++) {
391 G1FullGCCompactionPoint* cp = compaction_point(i);
392 if (cp->has_regions()) {
393 cp->remove_at_or_above(lowest_current);
394 }
395 }
396 return lowest_current;
397 }
398
399 void G1FullCollector::phase2c_prepare_serial_compaction() {
400 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
401 // At this point, we know that after parallel compaction there will be regions that
402 // are partially compacted into. Thus, the last compaction region of all
403 // compaction queues still have space in them. We try to re-compact these regions
404 // in serial to avoid a premature OOM when the mutator wants to allocate the first
405 // eden region after gc.
406
407 // For maximum compaction, we need to re-prepare all objects above the lowest
408 // region among the current regions for all thread compaction points. It may
409 // happen that due to the uneven distribution of objects to parallel threads, holes
410 // have been created as threads compact to different target regions between the
411 // lowest and the highest region in the tails of the compaction points.
412
413 uint start_serial = truncate_parallel_cps();
414 assert(start_serial < _heap->max_num_regions(), "Called on empty parallel compaction queues");
415
416 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
417 assert(!serial_cp->is_initialized(), "sanity!");
418
419 G1HeapRegion* start_hr = _heap->region_at(start_serial);
420 serial_cp->add(start_hr);
421 serial_cp->initialize(start_hr);
422
423 HeapWord* dense_prefix_top = compaction_top(start_hr);
424 G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
425
426 for (uint i = start_serial + 1; i < _heap->max_num_regions(); i++) {
427 if (is_compaction_target(i)) {
428 G1HeapRegion* current = _heap->region_at(i);
429 set_compaction_top(current, current->bottom());
430 serial_cp->add(current);
431 current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
432 }
433 }
434 serial_cp->update();
435 }
436
437 void G1FullCollector::phase2d_prepare_humongous_compaction() {
438 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
439 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
440 assert(serial_cp->has_regions(), "Sanity!" );
441
442 uint last_serial_target = serial_cp->current_region()->hrm_index();
443 uint region_index = last_serial_target + 1;
444 uint max_num_regions = _heap->max_num_regions();
445
446 G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
447
448 while (region_index < max_num_regions) {
449 G1HeapRegion* hr = _heap->region_at_or_null(region_index);
450
451 if (hr == nullptr) {
452 region_index++;
453 continue;
454 } else if (hr->is_starts_humongous()) {
455 size_t obj_size = cast_to_oop(hr->bottom())->size();
456 uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size);
457 // Even during last-ditch compaction we should not move pinned humongous objects.
458 if (!hr->has_pinned_objects()) {
459 humongous_cp->forward_humongous(hr);
460 }
461 region_index += num_regions; // Advance over all humongous regions.
462 continue;
463 } else if (is_compaction_target(region_index)) {
464 assert(!hr->has_pinned_objects(), "pinned regions should not be compaction targets");
465 // Add the region to the humongous compaction point.
466 humongous_cp->add(hr);
467 }
468 region_index++;
469 }
470 }
471
472 void G1FullCollector::phase3_adjust_pointers() {
473 // Adjust the pointers to reflect the new locations
474 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
475
476 G1FullGCAdjustTask task(this);
477 run_task(&task);
478 }
479
480 void G1FullCollector::phase4_do_compaction() {
481 // Compact the heap using the compaction queues created in phase 2.
482 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
483 G1FullGCCompactTask task(this);
484 run_task(&task);
485
486 // Serial compact to avoid OOM when very few free regions.
487 if (serial_compaction_point()->has_regions()) {
488 task.serial_compaction();
489 }
490
491 if (!_humongous_compaction_regions.is_empty()) {
492 assert(scope()->do_maximal_compaction(), "Only compact humongous during maximal compaction");
493 task.humongous_compaction();
494 }
495 }
496
497 void G1FullCollector::phase5_reset_metadata() {
498 // Clear region metadata that is invalid after GC for all regions.
499 GCTraceTime(Info, gc, phases) info("Phase 5: Reset Metadata", scope()->timer());
500 G1FullGCResetMetadataTask task(this);
501 run_task(&task);
502 }
503
504 void G1FullCollector::restore_marks() {
505 _preserved_marks_set.restore(_heap->workers());
506 _preserved_marks_set.reclaim();
507 }
508
509 void G1FullCollector::run_task(WorkerTask* task) {
510 _heap->workers()->run_task(task, _num_workers);
511 }
512
513 void G1FullCollector::verify_after_marking() {
514 if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
515 // Only do verification if VerifyDuringGC and G1VerifyFull is set.
516 return;
517 }
518
519 #if COMPILER2_OR_JVMCI
520 DerivedPointerTableDeactivate dpt_deact;
521 #endif
522 _heap->prepare_for_verify();
523 // Note: we can verify only the heap here. When an object is
524 // marked, the previous value of the mark word (including
525 // identity hash values, ages, etc) is preserved, and the mark
526 // word is set to markWord::marked_value - effectively removing
527 // any hash values from the mark word. These hash values are
528 // used when verifying the dictionaries and so removing them
529 // from the mark word can make verification of the dictionaries
530 // fail. At the end of the GC, the original mark word values
531 // (including hash values) are restored to the appropriate
532 // objects.
533 GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
534 _heap->verify(VerifyOption::G1UseFullMarking);
535 }