1 /*
  2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "gc/g1/g1CollectedHeap.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCAdjustTask.hpp"
 30 #include "gc/g1/g1FullGCCompactTask.hpp"
 31 #include "gc/g1/g1FullGCMarker.inline.hpp"
 32 #include "gc/g1/g1FullGCMarkTask.hpp"
 33 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 34 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
 35 #include "gc/g1/g1FullGCScope.hpp"
 36 #include "gc/g1/g1OopClosures.hpp"
 37 #include "gc/g1/g1Policy.hpp"
 38 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 39 #include "gc/shared/gcTraceTime.inline.hpp"
 40 #include "gc/shared/preservedMarks.inline.hpp"
 41 #include "gc/shared/classUnloadingContext.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"
 43 #include "gc/shared/slidingForwarding.hpp"
 44 #include "gc/shared/verifyOption.hpp"
 45 #include "gc/shared/weakProcessor.inline.hpp"
 46 #include "gc/shared/workerPolicy.hpp"
 47 #include "logging/log.hpp"
 48 #include "runtime/handles.inline.hpp"
 49 #include "utilities/debug.hpp"
 50 
 51 static void clear_and_activate_derived_pointers() {
 52 #if COMPILER2_OR_JVMCI
 53   DerivedPointerTable::clear();
 54 #endif
 55 }
 56 
 57 static void deactivate_derived_pointers() {
 58 #if COMPILER2_OR_JVMCI
 59   DerivedPointerTable::set_active(false);
 60 #endif
 61 }
 62 
 63 static void update_derived_pointers() {
 64 #if COMPILER2_OR_JVMCI
 65   DerivedPointerTable::update_pointers();
 66 #endif
 67 }
 68 
 69 G1CMBitMap* G1FullCollector::mark_bitmap() {
 70   return _heap->concurrent_mark()->mark_bitmap();
 71 }
 72 
 73 ReferenceProcessor* G1FullCollector::reference_processor() {
 74   return _heap->ref_processor_stw();
 75 }
 76 
 77 uint G1FullCollector::calc_active_workers() {
 78   G1CollectedHeap* heap = G1CollectedHeap::heap();
 79   uint max_worker_count = heap->workers()->max_workers();
 80   // Only calculate number of workers if UseDynamicNumberOfGCThreads
 81   // is enabled, otherwise use max.
 82   if (!UseDynamicNumberOfGCThreads) {
 83     return max_worker_count;
 84   }
 85 
 86   // Consider G1HeapWastePercent to decide max number of workers. Each worker
 87   // will in average cause half a region waste.
 88   uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
 89   uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
 90   uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
 91 
 92   // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
 93   // the number of workers.
 94   uint current_active_workers = heap->workers()->active_workers();
 95   uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
 96 
 97   // Finally consider the amount of used regions.
 98   uint used_worker_limit = heap->num_used_regions();
 99   assert(used_worker_limit > 0, "Should never have zero used regions.");
100 
101   // Update active workers to the lower of the limits.
102   uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
103   log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, "
104                       "adaptive workers: %u, used limited workers: %u)",
105                       worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
106   worker_count = heap->workers()->set_active_workers(worker_count);
107   log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
108 
109   return worker_count;
110 }
111 
112 G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
113                                  bool clear_soft_refs,
114                                  bool do_maximal_compaction,
115                                  G1FullGCTracer* tracer) :
116     _heap(heap),
117     _scope(heap->monitoring_support(), clear_soft_refs, do_maximal_compaction, tracer),
118     _num_workers(calc_active_workers()),
119     _has_compaction_targets(false),
120     _has_humongous(false),
121     _oop_queue_set(_num_workers),
122     _array_queue_set(_num_workers),
123     _preserved_marks_set(true),
124     _serial_compaction_point(this, nullptr),
125     _humongous_compaction_point(this, nullptr),
126     _is_alive(this, heap->concurrent_mark()->mark_bitmap()),
127     _is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
128     _humongous_compaction_regions(8),
129     _always_subject_to_discovery(),
130     _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
131     _region_attr_table() {
132   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
133 
134   _preserved_marks_set.init(_num_workers);
135   _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
136   _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
137 
138   _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_regions(), mtGC);
139   _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_regions(), mtGC);
140   for (uint j = 0; j < heap->max_regions(); j++) {
141     _live_stats[j].clear();
142     _compaction_tops[j] = nullptr;
143   }
144 
145   for (uint i = 0; i < _num_workers; i++) {
146     _markers[i] = new G1FullGCMarker(this, i, _live_stats);
147     _compaction_points[i] = new G1FullGCCompactionPoint(this, _preserved_marks_set.get(i));
148     _oop_queue_set.register_queue(i, marker(i)->oop_stack());
149     _array_queue_set.register_queue(i, marker(i)->objarray_stack());
150   }
151   _serial_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
152   _humongous_compaction_point.set_preserved_stack(_preserved_marks_set.get(0));
153   _region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
154 }
155 
156 G1FullCollector::~G1FullCollector() {
157   for (uint i = 0; i < _num_workers; i++) {
158     delete _markers[i];
159     delete _compaction_points[i];
160   }
161 
162   FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
163   FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
164   FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
165   FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
166 }
167 
168 class PrepareRegionsClosure : public HeapRegionClosure {
169   G1FullCollector* _collector;
170 
171 public:
172   PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { }
173 
174   bool do_heap_region(HeapRegion* hr) {
175     hr->prepare_for_full_gc();
176     G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
177     _collector->before_marking_update_attribute_table(hr);
178     return false;
179   }
180 };
181 
182 void G1FullCollector::prepare_collection() {
183   _heap->policy()->record_full_collection_start();
184 
185   // Verification needs the bitmap, so we should clear the bitmap only later.
186   bool in_concurrent_cycle = _heap->abort_concurrent_cycle();
187   _heap->verify_before_full_collection();
188   if (in_concurrent_cycle) {
189     GCTraceTime(Debug, gc) debug("Clear Bitmap");
190     _heap->concurrent_mark()->clear_bitmap(_heap->workers());
191   }
192 
193   _heap->gc_prologue(true);
194   _heap->retire_tlabs();
195   _heap->flush_region_pin_cache();
196   _heap->prepare_heap_for_full_collection();
197 
198   PrepareRegionsClosure cl(this);
199   _heap->heap_region_iterate(&cl);
200 
201   reference_processor()->start_discovery(scope()->should_clear_soft_refs());
202 
203   // Clear and activate derived pointer collection.
204   clear_and_activate_derived_pointers();
205 }
206 
207 void G1FullCollector::collect() {
208   G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
209 
210   phase1_mark_live_objects();
211   verify_after_marking();
212 
213   // Don't add any more derived pointers during later phases
214   deactivate_derived_pointers();
215 
216   SlidingForwarding::begin();
217 
218   phase2_prepare_compaction();
219 
220   if (has_compaction_targets()) {
221     phase3_adjust_pointers();
222 
223     phase4_do_compaction();
224   } else {
225     // All regions have a high live ratio thus will not be compacted.
226     // The live ratio is only considered if do_maximal_compaction is false.
227     log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
228   }
229 
230   SlidingForwarding::end();
231 
232   phase5_reset_metadata();
233 
234   G1CollectedHeap::finish_codecache_marking_cycle();
235 }
236 
237 void G1FullCollector::complete_collection() {
238   // Restore all marks.
239   restore_marks();
240 
241   // When the pointers have been adjusted and moved, we can
242   // update the derived pointer table.
243   update_derived_pointers();
244 
245   // Need completely cleared claim bits for the next concurrent marking or full gc.
246   ClassLoaderDataGraph::clear_claimed_marks();
247 
248   // Prepare the bitmap for the next (potentially concurrent) marking.
249   _heap->concurrent_mark()->clear_bitmap(_heap->workers());
250 
251   _heap->prepare_for_mutator_after_full_collection();
252 
253   _heap->resize_all_tlabs();
254 
255   _heap->policy()->record_full_collection_end();
256   _heap->gc_epilogue(true);
257 
258   _heap->verify_after_full_collection();
259 
260   _heap->print_heap_after_full_collection();
261 }
262 
263 void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
264   if (hr->is_free()) {
265     _region_attr_table.set_free(hr->hrm_index());
266   } else if (hr->is_humongous() || hr->has_pinned_objects()) {
267     // Humongous objects or pinned regions will never be moved in the "main"
268     // compaction phase, but non-pinned regions might afterwards in a special phase.
269     _region_attr_table.set_skip_compacting(hr->hrm_index());
270   } else {
271     // Everything else should be compacted.
272     _region_attr_table.set_compacting(hr->hrm_index());
273   }
274 }
275 
276 class G1FullGCRefProcProxyTask : public RefProcProxyTask {
277   G1FullCollector& _collector;
278 
279 public:
280   G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers)
281     : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers),
282       _collector(collector) {}
283 
284   void work(uint worker_id) override {
285     assert(worker_id < _max_workers, "sanity");
286     G1IsAliveClosure is_alive(&_collector);
287     uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
288     G1FullKeepAliveClosure keep_alive(_collector.marker(index));
289     BarrierEnqueueDiscoveredFieldClosure enqueue;
290     G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
291     _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
292   }
293 };
294 
295 void G1FullCollector::phase1_mark_live_objects() {
296   // Recursively traverse all live objects and mark them.
297   GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
298 
299   {
300     // Do the actual marking.
301     G1FullGCMarkTask marking_task(this);
302     run_task(&marking_task);
303   }
304 
305   {
306     uint old_active_mt_degree = reference_processor()->num_queues();
307     reference_processor()->set_active_mt_degree(workers());
308     GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer());
309     // Process reference objects found during marking.
310     ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
311     G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues());
312     const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt);
313     scope()->tracer()->report_gc_reference_stats(stats);
314     pt.print_all_references();
315     assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
316 
317     reference_processor()->set_active_mt_degree(old_active_mt_degree);
318   }
319 
320   {
321     GCTraceTime(Debug, gc, phases) debug("Phase 1: Flush Mark Stats Cache", scope()->timer());
322     for (uint i = 0; i < workers(); i++) {
323       marker(i)->flush_mark_stats_cache();
324     }
325   }
326 
327   // Weak oops cleanup.
328   {
329     GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer());
330     WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1);
331   }
332 
333   // Class unloading and cleanup.
334   if (ClassUnloading) {
335     _heap->unload_classes_and_code("Phase 1: Class Unloading and Cleanup", &_is_alive, scope()->timer());
336   }
337 
338   {
339     GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer());
340     scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
341   }
342 #if TASKQUEUE_STATS
343   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
344   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
345 #endif
346 }
347 
348 void G1FullCollector::phase2_prepare_compaction() {
349   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
350 
351   phase2a_determine_worklists();
352 
353   if (!has_compaction_targets()) {
354     return;
355   }
356 
357   bool has_free_compaction_targets = phase2b_forward_oops();
358 
359   // Try to avoid OOM immediately after Full GC in case there are no free regions
360   // left after determining the result locations (i.e. this phase). Prepare to
361   // maximally compact the tail regions of the compaction queues serially.
362   if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
363     phase2c_prepare_serial_compaction();
364 
365     if (scope()->do_maximal_compaction() &&
366         has_humongous() &&
367         serial_compaction_point()->has_regions()) {
368       phase2d_prepare_humongous_compaction();
369     }
370   }
371 }
372 
373 void G1FullCollector::phase2a_determine_worklists() {
374   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
375 
376   G1DetermineCompactionQueueClosure cl(this);
377   _heap->heap_region_iterate(&cl);
378 }
379 
380 bool G1FullCollector::phase2b_forward_oops() {
381   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
382 
383   G1FullGCPrepareTask task(this);
384   run_task(&task);
385 
386   return task.has_free_compaction_targets();
387 }
388 
389 uint G1FullCollector::truncate_parallel_cps() {
390   uint lowest_current = UINT_MAX;
391   for (uint i = 0; i < workers(); i++) {
392     G1FullGCCompactionPoint* cp = compaction_point(i);
393     if (cp->has_regions()) {
394       lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
395     }
396   }
397 
398   for (uint i = 0; i < workers(); i++) {
399     G1FullGCCompactionPoint* cp = compaction_point(i);
400     if (cp->has_regions()) {
401       cp->remove_at_or_above(lowest_current);
402     }
403   }
404   return lowest_current;
405 }
406 
407 template <bool ALT_FWD>
408 void G1FullCollector::phase2c_prepare_serial_compaction_impl() {
409   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
410   // At this point, we know that after parallel compaction there will be regions that
411   // are partially compacted into. Thus, the last compaction region of all
412   // compaction queues still have space in them. We try to re-compact these regions
413   // in serial to avoid a premature OOM when the mutator wants to allocate the first
414   // eden region after gc.
415 
416   // For maximum compaction, we need to re-prepare all objects above the lowest
417   // region among the current regions for all thread compaction points. It may
418   // happen that due to the uneven distribution of objects to parallel threads, holes
419   // have been created as threads compact to different target regions between the
420   // lowest and the highest region in the tails of the compaction points.
421 
422   uint start_serial = truncate_parallel_cps();
423   assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
424 
425   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
426   assert(!serial_cp->is_initialized(), "sanity!");
427 
428   HeapRegion* start_hr = _heap->region_at(start_serial);
429   serial_cp->add(start_hr);
430   serial_cp->initialize(start_hr);
431 
432   HeapWord* dense_prefix_top = compaction_top(start_hr);
433   G1SerialRePrepareClosure<ALT_FWD> re_prepare(serial_cp, dense_prefix_top);
434 
435   for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
436     if (is_compaction_target(i)) {
437       HeapRegion* current = _heap->region_at(i);
438       set_compaction_top(current, current->bottom());
439       serial_cp->add(current);
440       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
441     }
442   }
443   serial_cp->update();
444 }
445 
446 void G1FullCollector::phase2c_prepare_serial_compaction() {
447   if (UseAltGCForwarding) {
448     phase2c_prepare_serial_compaction_impl<true>();
449   } else {
450     phase2c_prepare_serial_compaction_impl<false>();
451   }
452 }
453 
454 template <bool ALT_FWD>
455 void G1FullCollector::phase2d_prepare_humongous_compaction_impl() {
456   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
457   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
458   assert(serial_cp->has_regions(), "Sanity!" );
459 
460   uint last_serial_target = serial_cp->current_region()->hrm_index();
461   uint region_index = last_serial_target + 1;
462   uint max_reserved_regions = _heap->max_reserved_regions();
463 
464   G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
465 
466   while (region_index < max_reserved_regions) {
467     HeapRegion* hr = _heap->region_at_or_null(region_index);
468 
469     if (hr == nullptr) {
470       region_index++;
471       continue;
472     } else if (hr->is_starts_humongous()) {
473       size_t obj_size = cast_to_oop(hr->bottom())->size();
474       uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size);
475       // Even during last-ditch compaction we should not move pinned humongous objects.
476       if (!hr->has_pinned_objects()) {
477         humongous_cp->forward_humongous<ALT_FWD>(hr);
478       }
479       region_index += num_regions; // Advance over all humongous regions.
480       continue;
481     } else if (is_compaction_target(region_index)) {
482       assert(!hr->has_pinned_objects(), "pinned regions should not be compaction targets");
483       // Add the region to the humongous compaction point.
484       humongous_cp->add(hr);
485     }
486     region_index++;
487   }
488 }
489 
490 void G1FullCollector::phase2d_prepare_humongous_compaction() {
491   if (UseAltGCForwarding) {
492     phase2d_prepare_humongous_compaction_impl<true>();
493   } else {
494     phase2d_prepare_humongous_compaction_impl<false>();
495   }
496 }
497 
498 void G1FullCollector::phase3_adjust_pointers() {
499   // Adjust the pointers to reflect the new locations
500   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
501 
502   G1FullGCAdjustTask task(this);
503   run_task(&task);
504 }
505 
506 void G1FullCollector::phase4_do_compaction() {
507   // Compact the heap using the compaction queues created in phase 2.
508   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
509   G1FullGCCompactTask task(this);
510   run_task(&task);
511 
512   // Serial compact to avoid OOM when very few free regions.
513   if (serial_compaction_point()->has_regions()) {
514     task.serial_compaction();
515   }
516 
517   if (!_humongous_compaction_regions.is_empty()) {
518     assert(scope()->do_maximal_compaction(), "Only compact humongous during maximal compaction");
519     task.humongous_compaction();
520   }
521 }
522 
523 void G1FullCollector::phase5_reset_metadata() {
524   // Clear region metadata that is invalid after GC for all regions.
525   GCTraceTime(Info, gc, phases) info("Phase 5: Reset Metadata", scope()->timer());
526   G1FullGCResetMetadataTask task(this);
527   run_task(&task);
528 }
529 
530 void G1FullCollector::restore_marks() {
531   _preserved_marks_set.restore(_heap->workers());
532   _preserved_marks_set.reclaim();
533 }
534 
535 void G1FullCollector::run_task(WorkerTask* task) {
536   _heap->workers()->run_task(task, _num_workers);
537 }
538 
539 void G1FullCollector::verify_after_marking() {
540   if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) {
541     // Only do verification if VerifyDuringGC and G1VerifyFull is set.
542     return;
543   }
544 
545 #if COMPILER2_OR_JVMCI
546   DerivedPointerTableDeactivate dpt_deact;
547 #endif
548   _heap->prepare_for_verify();
549   // Note: we can verify only the heap here. When an object is
550   // marked, the previous value of the mark word (including
551   // identity hash values, ages, etc) is preserved, and the mark
552   // word is set to markWord::marked_value - effectively removing
553   // any hash values from the mark word. These hash values are
554   // used when verifying the dictionaries and so removing them
555   // from the mark word can make verification of the dictionaries
556   // fail. At the end of the GC, the original mark word values
557   // (including hash values) are restored to the appropriate
558   // objects.
559   GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
560   _heap->verify(VerifyOption::G1UseFullMarking);
561 }