1 /* 2 * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "compiler/oopMap.hpp" 30 #include "gc/g1/g1CollectedHeap.hpp" 31 #include "gc/g1/g1FullCollector.inline.hpp" 32 #include "gc/g1/g1FullGCAdjustTask.hpp" 33 #include "gc/g1/g1FullGCCompactTask.hpp" 34 #include "gc/g1/g1FullGCMarker.inline.hpp" 35 #include "gc/g1/g1FullGCMarkTask.hpp" 36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp" 37 #include "gc/g1/g1FullGCResetMetadataTask.hpp" 38 #include "gc/g1/g1FullGCScope.hpp" 39 #include "gc/g1/g1OopClosures.hpp" 40 #include "gc/g1/g1Policy.hpp" 41 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" 42 #include "gc/shared/gcTraceTime.inline.hpp" 43 #include "gc/shared/preservedMarks.inline.hpp" 44 #include "gc/shared/referenceProcessor.hpp" 45 #include "gc/shared/slidingForwarding.hpp" 46 #include "gc/shared/verifyOption.hpp" 47 #include "gc/shared/weakProcessor.inline.hpp" 48 #include "gc/shared/workerPolicy.hpp" 49 #include "logging/log.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "utilities/debug.hpp" 52 53 static void clear_and_activate_derived_pointers() { 54 #if COMPILER2_OR_JVMCI 55 DerivedPointerTable::clear(); 56 #endif 57 } 58 59 static void deactivate_derived_pointers() { 60 #if COMPILER2_OR_JVMCI 61 DerivedPointerTable::set_active(false); 62 #endif 63 } 64 65 static void update_derived_pointers() { 66 #if COMPILER2_OR_JVMCI 67 DerivedPointerTable::update_pointers(); 68 #endif 69 } 70 71 G1CMBitMap* G1FullCollector::mark_bitmap() { 72 return _heap->concurrent_mark()->mark_bitmap(); 73 } 74 75 ReferenceProcessor* G1FullCollector::reference_processor() { 76 return _heap->ref_processor_stw(); 77 } 78 79 uint G1FullCollector::calc_active_workers() { 80 G1CollectedHeap* heap = G1CollectedHeap::heap(); 81 uint max_worker_count = heap->workers()->max_workers(); 82 // Only calculate number of workers if UseDynamicNumberOfGCThreads 83 // is enabled, otherwise use max. 84 if (!UseDynamicNumberOfGCThreads) { 85 return max_worker_count; 86 } 87 88 // Consider G1HeapWastePercent to decide max number of workers. Each worker 89 // will in average cause half a region waste. 90 uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100); 91 uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u); 92 uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count); 93 94 // Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate 95 // the number of workers. 96 uint current_active_workers = heap->workers()->active_workers(); 97 uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0); 98 99 // Finally consider the amount of used regions. 100 uint used_worker_limit = heap->num_used_regions(); 101 assert(used_worker_limit > 0, "Should never have zero used regions."); 102 103 // Update active workers to the lower of the limits. 104 uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit); 105 log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, " 106 "adaptive workers: %u, used limited workers: %u)", 107 worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit); 108 worker_count = heap->workers()->set_active_workers(worker_count); 109 log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count); 110 111 return worker_count; 112 } 113 114 G1FullCollector::G1FullCollector(G1CollectedHeap* heap, 115 bool clear_soft_refs, 116 bool do_maximal_compaction, 117 G1FullGCTracer* tracer) : 118 _heap(heap), 119 _scope(heap->monitoring_support(), clear_soft_refs, do_maximal_compaction, tracer), 120 _num_workers(calc_active_workers()), 121 _has_compaction_targets(false), 122 _has_humongous(false), 123 _oop_queue_set(_num_workers), 124 _array_queue_set(_num_workers), 125 _preserved_marks_set(true), 126 _serial_compaction_point(this), 127 _humongous_compaction_point(this), 128 _is_alive(this, heap->concurrent_mark()->mark_bitmap()), 129 _is_alive_mutator(heap->ref_processor_stw(), &_is_alive), 130 _humongous_compaction_regions(8), 131 _always_subject_to_discovery(), 132 _is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery), 133 _region_attr_table() { 134 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 135 136 _preserved_marks_set.init(_num_workers); 137 _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC); 138 _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC); 139 140 _live_stats = NEW_C_HEAP_ARRAY(G1RegionMarkStats, _heap->max_regions(), mtGC); 141 _compaction_tops = NEW_C_HEAP_ARRAY(HeapWord*, _heap->max_regions(), mtGC); 142 for (uint j = 0; j < heap->max_regions(); j++) { 143 _live_stats[j].clear(); 144 _compaction_tops[j] = nullptr; 145 } 146 147 for (uint i = 0; i < _num_workers; i++) { 148 _markers[i] = new G1FullGCMarker(this, i, _preserved_marks_set.get(i), _live_stats); 149 _compaction_points[i] = new G1FullGCCompactionPoint(this); 150 _oop_queue_set.register_queue(i, marker(i)->oop_stack()); 151 _array_queue_set.register_queue(i, marker(i)->objarray_stack()); 152 } 153 _region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes); 154 } 155 156 G1FullCollector::~G1FullCollector() { 157 for (uint i = 0; i < _num_workers; i++) { 158 delete _markers[i]; 159 delete _compaction_points[i]; 160 } 161 162 FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers); 163 FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points); 164 FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops); 165 FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats); 166 } 167 168 class PrepareRegionsClosure : public HeapRegionClosure { 169 G1FullCollector* _collector; 170 171 public: 172 PrepareRegionsClosure(G1FullCollector* collector) : _collector(collector) { } 173 174 bool do_heap_region(HeapRegion* hr) { 175 G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr); 176 _collector->before_marking_update_attribute_table(hr); 177 return false; 178 } 179 }; 180 181 void G1FullCollector::prepare_collection() { 182 _heap->policy()->record_full_collection_start(); 183 184 // Verification needs the bitmap, so we should clear the bitmap only later. 185 bool in_concurrent_cycle = _heap->abort_concurrent_cycle(); 186 _heap->verify_before_full_collection(); 187 if (in_concurrent_cycle) { 188 GCTraceTime(Debug, gc) debug("Clear Bitmap"); 189 _heap->concurrent_mark()->clear_bitmap(_heap->workers()); 190 } 191 192 _heap->gc_prologue(true); 193 _heap->retire_tlabs(); 194 _heap->prepare_heap_for_full_collection(); 195 196 PrepareRegionsClosure cl(this); 197 _heap->heap_region_iterate(&cl); 198 199 reference_processor()->start_discovery(scope()->should_clear_soft_refs()); 200 201 // Clear and activate derived pointer collection. 202 clear_and_activate_derived_pointers(); 203 } 204 205 void G1FullCollector::collect() { 206 G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */); 207 208 phase1_mark_live_objects(); 209 verify_after_marking(); 210 211 // Don't add any more derived pointers during later phases 212 deactivate_derived_pointers(); 213 214 SlidingForwarding::begin(); 215 216 phase2_prepare_compaction(); 217 218 if (has_compaction_targets()) { 219 phase3_adjust_pointers(); 220 221 phase4_do_compaction(); 222 } else { 223 // All regions have a high live ratio thus will not be compacted. 224 // The live ratio is only considered if do_maximal_compaction is false. 225 log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap"); 226 } 227 228 SlidingForwarding::end(); 229 230 phase5_reset_metadata(); 231 232 G1CollectedHeap::finish_codecache_marking_cycle(); 233 } 234 235 void G1FullCollector::complete_collection() { 236 // Restore all marks. 237 restore_marks(); 238 239 // When the pointers have been adjusted and moved, we can 240 // update the derived pointer table. 241 update_derived_pointers(); 242 243 // Need completely cleared claim bits for the next concurrent marking or full gc. 244 ClassLoaderDataGraph::clear_claimed_marks(); 245 246 // Prepare the bitmap for the next (potentially concurrent) marking. 247 _heap->concurrent_mark()->clear_bitmap(_heap->workers()); 248 249 _heap->prepare_for_mutator_after_full_collection(); 250 251 _heap->resize_all_tlabs(); 252 253 _heap->policy()->record_full_collection_end(); 254 _heap->gc_epilogue(true); 255 256 _heap->verify_after_full_collection(); 257 258 _heap->print_heap_after_full_collection(); 259 } 260 261 void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) { 262 if (hr->is_free()) { 263 _region_attr_table.set_free(hr->hrm_index()); 264 } else if (hr->is_humongous()) { 265 // Humongous objects will never be moved in the "main" compaction phase, but 266 // afterwards in a special phase if needed. 267 _region_attr_table.set_skip_compacting(hr->hrm_index()); 268 } else { 269 // Everything else should be compacted. 270 _region_attr_table.set_compacting(hr->hrm_index()); 271 } 272 } 273 274 class G1FullGCRefProcProxyTask : public RefProcProxyTask { 275 G1FullCollector& _collector; 276 277 public: 278 G1FullGCRefProcProxyTask(G1FullCollector &collector, uint max_workers) 279 : RefProcProxyTask("G1FullGCRefProcProxyTask", max_workers), 280 _collector(collector) {} 281 282 void work(uint worker_id) override { 283 assert(worker_id < _max_workers, "sanity"); 284 G1IsAliveClosure is_alive(&_collector); 285 uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id; 286 G1FullKeepAliveClosure keep_alive(_collector.marker(index)); 287 BarrierEnqueueDiscoveredFieldClosure enqueue; 288 G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure(); 289 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc); 290 } 291 }; 292 293 void G1FullCollector::phase1_mark_live_objects() { 294 // Recursively traverse all live objects and mark them. 295 GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer()); 296 297 { 298 // Do the actual marking. 299 G1FullGCMarkTask marking_task(this); 300 run_task(&marking_task); 301 } 302 303 { 304 uint old_active_mt_degree = reference_processor()->num_queues(); 305 reference_processor()->set_active_mt_degree(workers()); 306 GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer()); 307 // Process reference objects found during marking. 308 ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues()); 309 G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues()); 310 const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt); 311 scope()->tracer()->report_gc_reference_stats(stats); 312 pt.print_all_references(); 313 assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack"); 314 315 reference_processor()->set_active_mt_degree(old_active_mt_degree); 316 } 317 318 // Weak oops cleanup. 319 { 320 GCTraceTime(Debug, gc, phases) debug("Phase 1: Weak Processing", scope()->timer()); 321 WeakProcessor::weak_oops_do(_heap->workers(), &_is_alive, &do_nothing_cl, 1); 322 } 323 324 // Class unloading and cleanup. 325 if (ClassUnloading) { 326 GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer()); 327 { 328 CodeCache::UnlinkingScope unloading_scope(&_is_alive); 329 // Unload classes and purge the SystemDictionary. 330 bool unloading_occurred = SystemDictionary::do_unloading(scope()->timer()); 331 _heap->complete_cleaning(unloading_occurred); 332 } 333 CodeCache::flush_unlinked_nmethods(); 334 } 335 336 { 337 GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer()); 338 scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers()); 339 } 340 #if TASKQUEUE_STATS 341 oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue"); 342 array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue"); 343 #endif 344 } 345 346 void G1FullCollector::phase2_prepare_compaction() { 347 GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer()); 348 349 phase2a_determine_worklists(); 350 351 if (!has_compaction_targets()) { 352 return; 353 } 354 355 bool has_free_compaction_targets = phase2b_forward_oops(); 356 357 // Try to avoid OOM immediately after Full GC in case there are no free regions 358 // left after determining the result locations (i.e. this phase). Prepare to 359 // maximally compact the tail regions of the compaction queues serially. 360 if (scope()->do_maximal_compaction() || !has_free_compaction_targets) { 361 phase2c_prepare_serial_compaction(); 362 363 if (scope()->do_maximal_compaction() && 364 has_humongous() && 365 serial_compaction_point()->has_regions()) { 366 phase2d_prepare_humongous_compaction(); 367 } 368 } 369 } 370 371 void G1FullCollector::phase2a_determine_worklists() { 372 GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer()); 373 374 G1DetermineCompactionQueueClosure cl(this); 375 _heap->heap_region_iterate(&cl); 376 } 377 378 bool G1FullCollector::phase2b_forward_oops() { 379 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer()); 380 381 G1FullGCPrepareTask task(this); 382 run_task(&task); 383 384 return task.has_free_compaction_targets(); 385 } 386 387 uint G1FullCollector::truncate_parallel_cps() { 388 uint lowest_current = UINT_MAX; 389 for (uint i = 0; i < workers(); i++) { 390 G1FullGCCompactionPoint* cp = compaction_point(i); 391 if (cp->has_regions()) { 392 lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index()); 393 } 394 } 395 396 for (uint i = 0; i < workers(); i++) { 397 G1FullGCCompactionPoint* cp = compaction_point(i); 398 if (cp->has_regions()) { 399 cp->remove_at_or_above(lowest_current); 400 } 401 } 402 return lowest_current; 403 } 404 405 template <bool ALT_FWD> 406 void G1FullCollector::phase2c_prepare_serial_compaction_impl() { 407 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer()); 408 // At this point, we know that after parallel compaction there will be regions that 409 // are partially compacted into. Thus, the last compaction region of all 410 // compaction queues still have space in them. We try to re-compact these regions 411 // in serial to avoid a premature OOM when the mutator wants to allocate the first 412 // eden region after gc. 413 414 // For maximum compaction, we need to re-prepare all objects above the lowest 415 // region among the current regions for all thread compaction points. It may 416 // happen that due to the uneven distribution of objects to parallel threads, holes 417 // have been created as threads compact to different target regions between the 418 // lowest and the highest region in the tails of the compaction points. 419 420 uint start_serial = truncate_parallel_cps(); 421 assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues"); 422 423 G1FullGCCompactionPoint* serial_cp = serial_compaction_point(); 424 assert(!serial_cp->is_initialized(), "sanity!"); 425 426 HeapRegion* start_hr = _heap->region_at(start_serial); 427 serial_cp->add(start_hr); 428 serial_cp->initialize(start_hr); 429 430 HeapWord* dense_prefix_top = compaction_top(start_hr); 431 G1SerialRePrepareClosure<ALT_FWD> re_prepare(serial_cp, dense_prefix_top); 432 433 for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) { 434 if (is_compaction_target(i)) { 435 HeapRegion* current = _heap->region_at(i); 436 set_compaction_top(current, current->bottom()); 437 serial_cp->add(current); 438 current->apply_to_marked_objects(mark_bitmap(), &re_prepare); 439 } 440 } 441 serial_cp->update(); 442 } 443 444 void G1FullCollector::phase2c_prepare_serial_compaction() { 445 if (UseAltGCForwarding) { 446 phase2c_prepare_serial_compaction_impl<true>(); 447 } else { 448 phase2c_prepare_serial_compaction_impl<false>(); 449 } 450 } 451 452 template <bool ALT_FWD> 453 void G1FullCollector::phase2d_prepare_humongous_compaction_impl() { 454 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer()); 455 G1FullGCCompactionPoint* serial_cp = serial_compaction_point(); 456 assert(serial_cp->has_regions(), "Sanity!" ); 457 458 uint last_serial_target = serial_cp->current_region()->hrm_index(); 459 uint region_index = last_serial_target + 1; 460 uint max_reserved_regions = _heap->max_reserved_regions(); 461 462 G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point(); 463 464 while (region_index < max_reserved_regions) { 465 HeapRegion* hr = _heap->region_at_or_null(region_index); 466 467 if (hr == nullptr) { 468 region_index++; 469 continue; 470 } else if (hr->is_starts_humongous()) { 471 uint num_regions = humongous_cp->forward_humongous<ALT_FWD>(hr); 472 region_index += num_regions; // Skip over the continues humongous regions. 473 continue; 474 } else if (is_compaction_target(region_index)) { 475 // Add the region to the humongous compaction point. 476 humongous_cp->add(hr); 477 } 478 region_index++; 479 } 480 } 481 482 void G1FullCollector::phase2d_prepare_humongous_compaction() { 483 if (UseAltGCForwarding) { 484 phase2d_prepare_humongous_compaction_impl<true>(); 485 } else { 486 phase2d_prepare_humongous_compaction_impl<false>(); 487 } 488 } 489 490 void G1FullCollector::phase3_adjust_pointers() { 491 // Adjust the pointers to reflect the new locations 492 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer()); 493 494 G1FullGCAdjustTask task(this); 495 run_task(&task); 496 } 497 498 void G1FullCollector::phase4_do_compaction() { 499 // Compact the heap using the compaction queues created in phase 2. 500 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer()); 501 G1FullGCCompactTask task(this); 502 run_task(&task); 503 504 // Serial compact to avoid OOM when very few free regions. 505 if (serial_compaction_point()->has_regions()) { 506 task.serial_compaction(); 507 } 508 509 if (!_humongous_compaction_regions.is_empty()) { 510 assert(scope()->do_maximal_compaction(), "Only compact humongous during maximal compaction"); 511 task.humongous_compaction(); 512 } 513 } 514 515 void G1FullCollector::phase5_reset_metadata() { 516 // Clear region metadata that is invalid after GC for all regions. 517 GCTraceTime(Info, gc, phases) info("Phase 5: Reset Metadata", scope()->timer()); 518 G1FullGCResetMetadataTask task(this); 519 run_task(&task); 520 } 521 522 void G1FullCollector::restore_marks() { 523 _preserved_marks_set.restore(_heap->workers()); 524 _preserved_marks_set.reclaim(); 525 } 526 527 void G1FullCollector::run_task(WorkerTask* task) { 528 _heap->workers()->run_task(task, _num_workers); 529 } 530 531 void G1FullCollector::verify_after_marking() { 532 if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) { 533 // Only do verification if VerifyDuringGC and G1VerifyFull is set. 534 return; 535 } 536 537 #if COMPILER2_OR_JVMCI 538 DerivedPointerTableDeactivate dpt_deact; 539 #endif 540 _heap->prepare_for_verify(); 541 // Note: we can verify only the heap here. When an object is 542 // marked, the previous value of the mark word (including 543 // identity hash values, ages, etc) is preserved, and the mark 544 // word is set to markWord::marked_value - effectively removing 545 // any hash values from the mark word. These hash values are 546 // used when verifying the dictionaries and so removing them 547 // from the mark word can make verification of the dictionaries 548 // fail. At the end of the GC, the original mark word values 549 // (including hash values) are restored to the appropriate 550 // objects. 551 GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)"); 552 _heap->verify(VerifyOption::G1UseFullMarking); 553 }