1 /*
2 * Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "compiler/oopMap.hpp"
27 #include "cppstdlib/new.hpp"
28 #include "gc/g1/g1CardSetMemory.hpp"
29 #include "gc/g1/g1CardTableEntryClosure.hpp"
30 #include "gc/g1/g1CollectedHeap.inline.hpp"
31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
32 #include "gc/g1/g1CollectorState.inline.hpp"
33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
35 #include "gc/g1/g1EvacInfo.hpp"
36 #include "gc/g1/g1EvacStats.inline.hpp"
37 #include "gc/g1/g1HeapRegion.inline.hpp"
38 #include "gc/g1/g1HeapRegionPrinter.hpp"
39 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
40 #include "gc/g1/g1OopClosures.inline.hpp"
41 #include "gc/g1/g1ParScanThreadState.hpp"
42 #include "gc/g1/g1RemSet.hpp"
43 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
44 #include "gc/shared/bufferNode.hpp"
45 #include "gc/shared/partialArrayState.hpp"
46 #include "jfr/jfrEvents.hpp"
47 #include "oops/access.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/atomic.hpp"
51 #include "runtime/prefetch.inline.hpp"
52 #include "runtime/threads.hpp"
53 #include "runtime/threadSMR.hpp"
54 #include "utilities/bitMap.inline.hpp"
55 #include "utilities/ticks.hpp"
56
57 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
58 G1ParScanThreadStateSet* _per_thread_states;
59
60 public:
61 MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
62 G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
63 _per_thread_states(per_thread_states) { }
64
65 double worker_cost() const override { return 1.0; }
66
67 void do_work(uint worker_id) override { _per_thread_states->flush_stats(); }
68 };
69
70 class G1PostEvacuateCollectionSetCleanupTask1::RecalculateUsedTask : public G1AbstractSubTask {
71 bool _evacuation_failed;
72 bool _allocation_failed;
73
74 public:
75 RecalculateUsedTask(bool evacuation_failed, bool allocation_failed) :
76 G1AbstractSubTask(G1GCPhaseTimes::RecalculateUsed),
77 _evacuation_failed(evacuation_failed),
78 _allocation_failed(allocation_failed) { }
79
80 double worker_cost() const override {
81 // If there is no evacuation failure, the work to perform is minimal.
82 return _evacuation_failed ? 1.0 : AlmostNoWork;
83 }
84
85 void do_work(uint worker_id) override {
86 G1CollectedHeap::heap()->update_used_after_gc(_evacuation_failed);
87 if (_allocation_failed) {
88 // Reset the G1GCAllocationFailureALot counters and flags
89 G1CollectedHeap::heap()->allocation_failure_injector()->reset();
90 }
91 }
92 };
93
94 class G1PostEvacuateCollectionSetCleanupTask1::SampleCollectionSetCandidatesTask : public G1AbstractSubTask {
95 public:
96 SampleCollectionSetCandidatesTask() : G1AbstractSubTask(G1GCPhaseTimes::SampleCollectionSetCandidates) { }
97
98 static bool should_execute() {
99 return G1CollectedHeap::heap()->should_sample_collection_set_candidates();
100 }
101
102 double worker_cost() const override {
103 return should_execute() ? 1.0 : AlmostNoWork;
104 }
105
106 void do_work(uint worker_id) override {
107 G1CollectedHeap* g1h = G1CollectedHeap::heap();
108
109 G1MonotonicArenaMemoryStats _total;
110 G1CollectionSetCandidates* candidates = g1h->collection_set()->candidates();
111 for (G1CSetCandidateGroup* gr : candidates->from_marking_groups()) {
112 _total.add(gr->card_set_memory_stats());
113 }
114
115 for (G1CSetCandidateGroup* gr : candidates->retained_groups()) {
116 _total.add(gr->card_set_memory_stats());
117 }
118 g1h->set_collection_set_candidates_stats(_total);
119 }
120 };
121
122 class G1PostEvacuateCollectionSetCleanupTask1::RestoreEvacFailureRegionsTask : public G1AbstractSubTask {
123 G1CollectedHeap* _g1h;
124 G1ConcurrentMark* _cm;
125
126 G1EvacFailureRegions* _evac_failure_regions;
127 CHeapBitMap _chunk_bitmap;
128
129 uint _num_chunks_per_region;
130 uint _num_evac_fail_regions;
131 size_t _chunk_size;
132
133 class PhaseTimesStat {
134 static constexpr G1GCPhaseTimes::GCParPhases phase_name =
135 G1GCPhaseTimes::RemoveSelfForwards;
136
137 G1GCPhaseTimes* _phase_times;
138 uint _worker_id;
139 Ticks _start;
140
141 public:
142 PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
143 _phase_times(phase_times),
144 _worker_id(worker_id),
145 _start(Ticks::now()) { }
146
147 ~PhaseTimesStat() {
148 _phase_times->record_or_add_time_secs(phase_name,
149 _worker_id,
150 (Ticks::now() - _start).seconds());
151 }
152
153 void register_empty_chunk() {
154 _phase_times->record_or_add_thread_work_item(phase_name,
155 _worker_id,
156 1,
157 G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
158 }
159
160 void register_nonempty_chunk() {
161 _phase_times->record_or_add_thread_work_item(phase_name,
162 _worker_id,
163 1,
164 G1GCPhaseTimes::RemoveSelfForwardChunksNum);
165 }
166
167 void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
168 _phase_times->record_or_add_thread_work_item(phase_name,
169 _worker_id,
170 num_marked_obj,
171 G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
172
173 size_t marked_bytes = marked_words * HeapWordSize;
174 _phase_times->record_or_add_thread_work_item(phase_name,
175 _worker_id,
176 marked_bytes,
177 G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
178 }
179 };
180
181 // Fill the memory area from start to end with filler objects, and update the BOT
182 // accordingly. Since we clear and use the bitmap for marking objects that failed
183 // evacuation, there is no other work to be done there.
184 static size_t zap_dead_objects(G1HeapRegion* hr, HeapWord* start, HeapWord* end) {
185 assert(start <= end, "precondition");
186 if (start == end) {
187 return 0;
188 }
189
190 hr->fill_range_with_dead_objects(start, end);
191 return pointer_delta(end, start);
192 }
193
194 static void update_garbage_words_in_hr(G1HeapRegion* hr, size_t garbage_words) {
195 if (garbage_words != 0) {
196 hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
197 }
198 }
199
200 static void prefetch_obj(HeapWord* obj_addr) {
201 Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
202 }
203
204 bool claim_chunk(uint chunk_idx) {
205 return _chunk_bitmap.par_set_bit(chunk_idx);
206 }
207
208 void process_chunk(uint worker_id, uint chunk_idx) {
209 PhaseTimesStat stat(_g1h->phase_times(), worker_id);
210
211 G1CMBitMap* bitmap = _cm->mark_bitmap();
212 const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
213 G1HeapRegion* hr = _g1h->region_at(region_idx);
214
215 HeapWord* hr_bottom = hr->bottom();
216 HeapWord* hr_top = hr->top();
217 HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
218
219 assert(chunk_start < hr->end(), "inv");
220 if (chunk_start >= hr_top) {
221 return;
222 }
223
224 HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
225 HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
226
227 size_t garbage_words = 0;
228
229 if (chunk_start == hr_bottom) {
230 // This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
231 garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
232 }
233
234 if (first_marked_addr >= chunk_end) {
235 stat.register_empty_chunk();
236 update_garbage_words_in_hr(hr, garbage_words);
237 return;
238 }
239
240 stat.register_nonempty_chunk();
241
242 size_t num_marked_objs = 0;
243 size_t marked_words = 0;
244
245 HeapWord* obj_addr = first_marked_addr;
246 assert(chunk_start <= obj_addr && obj_addr < chunk_end,
247 "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
248 p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
249 do {
250 assert(bitmap->is_marked(obj_addr), "inv");
251 prefetch_obj(obj_addr);
252
253 oop obj = cast_to_oop(obj_addr);
254 const size_t obj_size = obj->size();
255 HeapWord* const obj_end_addr = obj_addr + obj_size;
256
257 {
258 // Process marked object.
259 assert(obj->is_self_forwarded(), "must be self-forwarded");
260 obj->unset_self_forwarded();
261 hr->update_bot_for_block(obj_addr, obj_end_addr);
262
263 // Statistics
264 num_marked_objs++;
265 marked_words += obj_size;
266 }
267
268 assert(obj_end_addr <= hr_top, "inv");
269 // Use hr_top as the limit so that we zap dead ranges up to the next
270 // marked obj or hr_top.
271 HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
272 garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
273 obj_addr = next_marked_obj_addr;
274 } while (obj_addr < chunk_end);
275
276 assert(marked_words > 0 && num_marked_objs > 0, "inv");
277
278 stat.register_objects_count_and_size(num_marked_objs, marked_words);
279
280 update_garbage_words_in_hr(hr, garbage_words);
281 }
282
283 public:
284 RestoreEvacFailureRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
285 G1AbstractSubTask(G1GCPhaseTimes::RestoreEvacuationFailedRegions),
286 _g1h(G1CollectedHeap::heap()),
287 _cm(_g1h->concurrent_mark()),
288 _evac_failure_regions(evac_failure_regions),
289 _chunk_bitmap(mtGC) {
290
291 _num_evac_fail_regions = _evac_failure_regions->num_regions_evac_failed();
292 _num_chunks_per_region = G1CollectedHeap::get_chunks_per_region_for_scan();
293
294 _chunk_size = static_cast<uint>(G1HeapRegion::GrainWords / _num_chunks_per_region);
295
296 log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
297 _num_chunks_per_region);
298
299 _chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
300 }
301
302 double worker_cost() const override {
303 assert(_evac_failure_regions->has_regions_evac_failed(), "Should not call this if there were no evacuation failures");
304
305 double workers_per_region = (double)G1CollectedHeap::get_chunks_per_region_for_scan() / G1RestoreRetainedRegionChunksPerWorker;
306 return workers_per_region * _evac_failure_regions->num_regions_evac_failed();
307 }
308
309 void do_work(uint worker_id) override {
310 const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
311 const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
312 const uint start_chunk_idx = worker_id * total_chunks / total_workers;
313
314 for (uint i = 0; i < total_chunks; i++) {
315 const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
316 if (claim_chunk(chunk_idx)) {
317 process_chunk(worker_id, chunk_idx);
318 }
319 }
320 }
321 };
322
323 G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
324 G1EvacFailureRegions* evac_failure_regions) :
325 G1BatchedTask("Post Evacuate Cleanup 1", G1CollectedHeap::heap()->phase_times())
326 {
327 bool evac_failed = evac_failure_regions->has_regions_evac_failed();
328 bool alloc_failed = evac_failure_regions->has_regions_alloc_failed();
329
330 add_serial_task(new MergePssTask(per_thread_states));
331 add_serial_task(new RecalculateUsedTask(evac_failed, alloc_failed));
332 if (SampleCollectionSetCandidatesTask::should_execute()) {
333 add_serial_task(new SampleCollectionSetCandidatesTask());
334 }
335 add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
336 if (evac_failed) {
337 add_parallel_task(new RestoreEvacFailureRegionsTask(evac_failure_regions));
338 }
339 }
340
341 class G1FreeHumongousRegionClosure : public G1HeapRegionIndexClosure {
342 uint _humongous_objects_reclaimed;
343 uint _humongous_regions_reclaimed;
344 size_t _freed_bytes;
345 G1CollectedHeap* _g1h;
346
347 // Returns whether the given humongous object defined by the start region index
348 // is reclaimable.
349 //
350 // At this point in the garbage collection, checking whether the humongous object
351 // is still a candidate is sufficient because:
352 //
353 // - if it has not been a candidate at the start of collection, it will never
354 // changed to be a candidate during the gc (and live).
355 // - any found outstanding (i.e. in its remembered set, or from the collection
356 // set) references will set the candidate state to false.
357 // - there can be no references from within humongous starts regions referencing
358 // the object because we never allocate other objects into them.
359 // (I.e. there can be no intra-region references within humongous objects)
360 //
361 // It is not required to check whether the object has been found dead by marking
362 // or not, in fact it would prevent reclamation within a concurrent cycle, as
363 // all objects allocated during that time are considered live.
364 // SATB marking is even more conservative than the remembered set.
365 // So if at this point in the collection we did not find a reference during gc
366 // (or it had enough references to not be a candidate, having many remembered
367 // set entries), nobody has a reference to it.
368 //
369 // Since remembered sets are only ever updated by concurrent refinement threads
370 // at mutator time, the remembered sets do not need to be checked again.
371 //
372 // Other implementation considerations:
373 // - never consider non-typeArrays during marking as there is a considerable cost
374 // for maintaining the SATB invariant.
375 bool is_reclaimable(uint region_idx) const {
376 return G1CollectedHeap::heap()->is_humongous_reclaim_candidate(region_idx);
377 }
378
379 public:
380 G1FreeHumongousRegionClosure() :
381 _humongous_objects_reclaimed(0),
382 _humongous_regions_reclaimed(0),
383 _freed_bytes(0),
384 _g1h(G1CollectedHeap::heap())
385 {}
386
387 bool do_heap_region_index(uint region_index) override {
388 if (!is_reclaimable(region_index)) {
389 return false;
390 }
391
392 G1HeapRegion* r = _g1h->region_at(region_index);
393
394 oop obj = cast_to_oop(r->bottom());
395 {
396 ResourceMark rm;
397 bool allocated_after_mark_start = r->bottom() == _g1h->concurrent_mark()->top_at_mark_start(r);
398 bool mark_in_progress = _g1h->collector_state()->is_in_marking();
399 guarantee(_g1h->can_be_marked_through_immediately(obj) || (allocated_after_mark_start || !mark_in_progress),
400 "Only eagerly reclaiming arrays without oops is always supported, other humongous objects only if allocated after mark start, but the object "
401 PTR_FORMAT " (%s) is not (allocated after mark: %d mark in progress %d marked immediately %d is_array %d array_with_oops %d).",
402 p2i(r->bottom()), obj->klass()->name()->as_C_string(), allocated_after_mark_start, mark_in_progress, _g1h->can_be_marked_through_immediately(obj), obj->is_array(), obj->is_array_with_oops());
403 }
404 log_debug(gc, humongous)("Reclaimed humongous region %u (object size %zu @ " PTR_FORMAT ")",
405 region_index,
406 obj->size() * HeapWordSize,
407 p2i(r->bottom())
408 );
409
410 G1ConcurrentMark* const cm = _g1h->concurrent_mark();
411 cm->humongous_object_eagerly_reclaimed(r);
412 assert(!cm->is_marked_in_bitmap(obj),
413 "Eagerly reclaimed humongous region %u should not be marked at all but is in bitmap %s",
414 region_index,
415 BOOL_TO_STR(cm->is_marked_in_bitmap(obj)));
416 _humongous_objects_reclaimed++;
417
418 auto free_humongous_region = [&] (G1HeapRegion* r) {
419 _freed_bytes += r->used();
420 r->set_containing_set(nullptr);
421 _humongous_regions_reclaimed++;
422 G1HeapRegionPrinter::eager_reclaim(r);
423 // Humongous non-typeArrays may have dirty card tables. Need to be cleared. Do it
424 // for all types just in case.
425 r->clear_both_card_tables();
426 _g1h->free_humongous_region(r, nullptr);
427 };
428
429 _g1h->humongous_obj_regions_iterate(r, free_humongous_region);
430
431 return false;
432 }
433
434 uint humongous_objects_reclaimed() {
435 return _humongous_objects_reclaimed;
436 }
437
438 uint humongous_regions_reclaimed() {
439 return _humongous_regions_reclaimed;
440 }
441
442 size_t bytes_freed() const {
443 return _freed_bytes;
444 }
445 };
446
447 #if COMPILER2_OR_JVMCI
448 class G1PostEvacuateCollectionSetCleanupTask2::UpdateDerivedPointersTask : public G1AbstractSubTask {
449 public:
450 UpdateDerivedPointersTask() : G1AbstractSubTask(G1GCPhaseTimes::UpdateDerivedPointers) { }
451
452 double worker_cost() const override { return 1.0; }
453 void do_work(uint worker_id) override { DerivedPointerTable::update_pointers(); }
454 };
455 #endif
456
457 class G1PostEvacuateCollectionSetCleanupTask2::EagerlyReclaimHumongousObjectsTask : public G1AbstractSubTask {
458 uint _humongous_regions_reclaimed;
459 size_t _bytes_freed;
460
461 public:
462 EagerlyReclaimHumongousObjectsTask() :
463 G1AbstractSubTask(G1GCPhaseTimes::EagerlyReclaimHumongousObjects),
464 _humongous_regions_reclaimed(0),
465 _bytes_freed(0) { }
466
467 virtual ~EagerlyReclaimHumongousObjectsTask() {
468 G1CollectedHeap* g1h = G1CollectedHeap::heap();
469
470 g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
471 g1h->decrement_summary_bytes(_bytes_freed);
472 }
473
474 double worker_cost() const override { return 1.0; }
475 void do_work(uint worker_id) override {
476 G1CollectedHeap* g1h = G1CollectedHeap::heap();
477
478 G1FreeHumongousRegionClosure cl;
479 g1h->heap_region_iterate(&cl);
480
481 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
482 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
483 record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
484
485 _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
486 _bytes_freed = cl.bytes_freed();
487 }
488 };
489
490 class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTask : public G1AbstractSubTask {
491 G1EvacFailureRegions* _evac_failure_regions;
492 G1HeapRegionClaimer _claimer;
493
494 class ProcessEvacuationFailedRegionsClosure : public G1HeapRegionClosure {
495 public:
496
497 bool do_heap_region(G1HeapRegion* r) override {
498 G1CollectedHeap* g1h = G1CollectedHeap::heap();
499 G1ConcurrentMark* cm = g1h->concurrent_mark();
500
501 // Concurrent mark does not mark through regions that we retain (they are root
502 // regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
503 // set eagerly or during evacuation failure.
504 bool clear_mark_data = !g1h->collector_state()->is_in_concurrent_start_gc() ||
505 g1h->policy()->should_retain_evac_failed_region(r);
506
507 if (clear_mark_data) {
508 g1h->clear_bitmap_for_region(r);
509 } else {
510 // This evacuation failed region is going to be marked through. Update mark data.
511 cm->update_top_at_mark_start(r);
512 cm->set_live_bytes(r->hrm_index(), r->live_bytes());
513 assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), cm->top_at_mark_start(r)) != cm->top_at_mark_start(r),
514 "Marks must be on bitmap for region %u", r->hrm_index());
515 }
516 return false;
517 }
518 };
519
520 public:
521 ProcessEvacuationFailedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
522 G1AbstractSubTask(G1GCPhaseTimes::ProcessEvacuationFailedRegions),
523 _evac_failure_regions(evac_failure_regions),
524 _claimer(0) {
525 }
526
527 void set_max_workers(uint max_workers) override {
528 _claimer.set_n_workers(max_workers);
529 }
530
531 double worker_cost() const override {
532 return _evac_failure_regions->num_regions_evac_failed();
533 }
534
535 void do_work(uint worker_id) override {
536 ProcessEvacuationFailedRegionsClosure cl;
537 _evac_failure_regions->par_iterate(&cl, &_claimer, worker_id);
538 }
539 };
540
541 // Helper class to keep statistics for the collection set freeing
542 class FreeCSetStats {
543 size_t _before_used_bytes; // Usage in regions successfully evacuate
544 size_t _after_used_bytes; // Usage in regions failing evacuation
545 size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
546 size_t _failure_used_words; // Live size in failed regions
547 size_t _failure_waste_words; // Wasted size in failed regions
548 uint _regions_freed; // Number of regions freed
549
550 public:
551 FreeCSetStats() :
552 _before_used_bytes(0),
553 _after_used_bytes(0),
554 _bytes_allocated_in_old_since_last_gc(0),
555 _failure_used_words(0),
556 _failure_waste_words(0),
557 _regions_freed(0) { }
558
559 void merge_stats(FreeCSetStats* other) {
560 assert(other != nullptr, "invariant");
561 _before_used_bytes += other->_before_used_bytes;
562 _after_used_bytes += other->_after_used_bytes;
563 _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
564 _failure_used_words += other->_failure_used_words;
565 _failure_waste_words += other->_failure_waste_words;
566 _regions_freed += other->_regions_freed;
567 }
568
569 void report(G1CollectedHeap* g1h, G1EvacInfo* evacuation_info) {
570 evacuation_info->set_regions_freed(_regions_freed);
571 evacuation_info->set_collection_set_used_before(_before_used_bytes + _after_used_bytes);
572 evacuation_info->increment_collection_set_used_after(_after_used_bytes);
573
574 g1h->decrement_summary_bytes(_before_used_bytes);
575 g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
576
577 G1Policy *policy = g1h->policy();
578 policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc);
579
580 policy->cset_regions_freed();
581 }
582
583 void account_failed_region(G1HeapRegion* r) {
584 size_t used_words = r->live_bytes() / HeapWordSize;
585 _failure_used_words += used_words;
586 _failure_waste_words += G1HeapRegion::GrainWords - used_words;
587 _after_used_bytes += r->used();
588
589 // When moving a young gen region to old gen, we "allocate" that whole
590 // region there. This is in addition to any already evacuated objects.
591 // Notify the policy about that. Old gen regions do not cause an
592 // additional allocation: both the objects still in the region and the
593 // ones already moved are accounted for elsewhere.
594 if (r->is_young()) {
595 _bytes_allocated_in_old_since_last_gc += G1HeapRegion::GrainBytes;
596 }
597 }
598
599 void account_evacuated_region(G1HeapRegion* r) {
600 size_t used = r->used();
601 assert(used > 0, "region %u %s zero used", r->hrm_index(), r->get_short_type_str());
602 _before_used_bytes += used;
603 _regions_freed += 1;
604 }
605 };
606
607 // Closure applied to all regions in the collection set.
608 class FreeCSetClosure : public G1HeapRegionClosure {
609 // Helper to send JFR events for regions.
610 class JFREventForRegion {
611 EventGCPhaseParallel _event;
612
613 public:
614 JFREventForRegion(G1HeapRegion* region, uint worker_id) : _event() {
615 _event.set_gcId(GCId::current());
616 _event.set_gcWorkerId(worker_id);
617 if (region->is_young()) {
618 _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
619 } else {
620 _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
621 }
622 }
623
624 ~JFREventForRegion() {
625 _event.commit();
626 }
627 };
628
629 // Helper to do timing for region work.
630 class TimerForRegion {
631 Tickspan& _time;
632 Ticks _start_time;
633 public:
634 TimerForRegion(Tickspan& time) : _time(time), _start_time(Ticks::now()) { }
635 ~TimerForRegion() {
636 _time += Ticks::now() - _start_time;
637 }
638 };
639
640 // FreeCSetClosure members
641 G1CollectedHeap* _g1h;
642 const size_t* _surviving_young_words;
643 uint _worker_id;
644 Tickspan _young_time;
645 Tickspan _non_young_time;
646 FreeCSetStats* _stats;
647 G1EvacFailureRegions* _evac_failure_regions;
648 uint _num_retained_regions;
649
650 void assert_tracks_surviving_words(G1HeapRegion* r) {
651 assert(r->young_index_in_cset() != 0 &&
652 (uint)r->young_index_in_cset() <= _g1h->collection_set()->young_region_length(),
653 "Young index %u is wrong for region %u of type %s with %u young regions",
654 r->young_index_in_cset(), r->hrm_index(), r->get_type_str(), _g1h->collection_set()->young_region_length());
655 }
656
657 void handle_evacuated_region(G1HeapRegion* r) {
658 assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
659 stats()->account_evacuated_region(r);
660
661 G1HeapRegionPrinter::evac_reclaim(r);
662 // Free the region and its remembered set.
663 _g1h->free_region(r, nullptr);
664 }
665
666 void handle_failed_region(G1HeapRegion* r) {
667 // Do some allocation statistics accounting. Regions that failed evacuation
668 // are always made old, so there is no need to update anything in the young
669 // gen statistics, but we need to update old gen statistics.
670 stats()->account_failed_region(r);
671
672 G1GCPhaseTimes* p = _g1h->phase_times();
673 assert(r->in_collection_set(), "Failed evacuation of region %u not in collection set", r->hrm_index());
674
675 p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreEvacuationFailedRegions,
676 _worker_id,
677 1,
678 G1GCPhaseTimes::RestoreEvacFailureRegionsEvacFailedNum);
679
680 bool retain_region = _g1h->policy()->should_retain_evac_failed_region(r);
681 // Update the region state due to the failed evacuation.
682 r->handle_evacuation_failure(retain_region);
683 assert(r->is_old(), "must already be relabelled as old");
684
685 if (retain_region) {
686 _g1h->retain_region(r);
687 _num_retained_regions++;
688 }
689 assert(retain_region == r->rem_set()->is_tracked(), "When retaining a region, remembered set should be kept.");
690
691 // Add region to old set, need to hold lock.
692 MutexLocker x(G1OldSets_lock, Mutex::_no_safepoint_check_flag);
693 _g1h->old_set_add(r);
694 }
695
696 Tickspan& timer_for_region(G1HeapRegion* r) {
697 return r->is_young() ? _young_time : _non_young_time;
698 }
699
700 FreeCSetStats* stats() {
701 return _stats;
702 }
703
704 public:
705 FreeCSetClosure(const size_t* surviving_young_words,
706 uint worker_id,
707 FreeCSetStats* stats,
708 G1EvacFailureRegions* evac_failure_regions) :
709 G1HeapRegionClosure(),
710 _g1h(G1CollectedHeap::heap()),
711 _surviving_young_words(surviving_young_words),
712 _worker_id(worker_id),
713 _young_time(),
714 _non_young_time(),
715 _stats(stats),
716 _evac_failure_regions(evac_failure_regions),
717 _num_retained_regions(0) { }
718
719 virtual bool do_heap_region(G1HeapRegion* r) {
720 assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
721 JFREventForRegion event(r, _worker_id);
722 TimerForRegion timer(timer_for_region(r));
723
724 if (r->is_young()) {
725 assert_tracks_surviving_words(r);
726 r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
727 }
728
729 if (_evac_failure_regions->contains(r->hrm_index())) {
730 handle_failed_region(r);
731 } else {
732 handle_evacuated_region(r);
733 }
734 assert(!_g1h->is_on_master_free_list(r), "sanity");
735
736 return false;
737 }
738
739 void report_timing() {
740 G1GCPhaseTimes* pt = _g1h->phase_times();
741 if (_young_time.value() > 0) {
742 pt->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, _worker_id, _young_time.seconds());
743 }
744 if (_non_young_time.value() > 0) {
745 pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
746 }
747 }
748
749 bool num_retained_regions() const { return _num_retained_regions; }
750 };
751
752 class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1AbstractSubTask {
753 G1CollectedHeap* _g1h;
754 G1EvacInfo* _evacuation_info;
755 FreeCSetStats* _worker_stats;
756 G1HeapRegionClaimer _claimer;
757 const size_t* _surviving_young_words;
758 uint _active_workers;
759 G1EvacFailureRegions* _evac_failure_regions;
760 Atomic<uint> _num_retained_regions;
761
762 FreeCSetStats* worker_stats(uint worker) {
763 return &_worker_stats[worker];
764 }
765
766 void report_statistics() {
767 // Merge the accounting
768 FreeCSetStats total_stats;
769 for (uint worker = 0; worker < _active_workers; worker++) {
770 total_stats.merge_stats(worker_stats(worker));
771 }
772 total_stats.report(_g1h, _evacuation_info);
773 }
774
775 public:
776 FreeCollectionSetTask(G1EvacInfo* evacuation_info,
777 const size_t* surviving_young_words,
778 G1EvacFailureRegions* evac_failure_regions) :
779 G1AbstractSubTask(G1GCPhaseTimes::FreeCollectionSet),
780 _g1h(G1CollectedHeap::heap()),
781 _evacuation_info(evacuation_info),
782 _worker_stats(nullptr),
783 _claimer(0),
784 _surviving_young_words(surviving_young_words),
785 _active_workers(0),
786 _evac_failure_regions(evac_failure_regions),
787 _num_retained_regions(0) {
788
789 _g1h->clear_eden();
790 }
791
792 virtual ~FreeCollectionSetTask() {
793 Ticks serial_time = Ticks::now();
794
795 bool has_new_retained_regions = _num_retained_regions.load_relaxed() != 0;
796 if (has_new_retained_regions) {
797 G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
798 candidates->sort_by_efficiency();
799 }
800
801 report_statistics();
802 for (uint worker = 0; worker < _active_workers; worker++) {
803 _worker_stats[worker].~FreeCSetStats();
804 }
805 FREE_C_HEAP_ARRAY(_worker_stats);
806
807 _g1h->clear_collection_set();
808
809 G1GCPhaseTimes* p = _g1h->phase_times();
810 p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
811 }
812
813 double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->initial_region_length(); }
814
815 void set_max_workers(uint max_workers) override {
816 _active_workers = max_workers;
817 _worker_stats = NEW_C_HEAP_ARRAY(FreeCSetStats, max_workers, mtGC);
818 ::new (_worker_stats) FreeCSetStats[_active_workers]{};
819 _claimer.set_n_workers(_active_workers);
820 }
821
822 void do_work(uint worker_id) override {
823 FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id), _evac_failure_regions);
824 _g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
825 // Report per-region type timings.
826 cl.report_timing();
827
828 _num_retained_regions.add_then_fetch(cl.num_retained_regions(), memory_order_relaxed);
829 }
830 };
831
832 class G1PostEvacuateCollectionSetCleanupTask2::ResizeTLABsAndSwapCardTableTask : public G1AbstractSubTask {
833 G1JavaThreadsListClaimer _claimer;
834
835 // There is not much work per thread so the number of threads per worker is high.
836 static const uint ThreadsPerWorker = 250;
837
838 public:
839 ResizeTLABsAndSwapCardTableTask()
840 : G1AbstractSubTask(G1GCPhaseTimes::ResizeThreadLABs), _claimer(ThreadsPerWorker)
841 {
842 G1BarrierSet::g1_barrier_set()->swap_global_card_table();
843 }
844
845 void do_work(uint worker_id) override {
846
847 class ResizeAndSwapCardTableClosure : public ThreadClosure {
848 public:
849
850 void do_thread(Thread* thread) {
851 if (UseTLAB && ResizeTLAB) {
852 thread->tlab().resize();
853 }
854
855 G1BarrierSet::g1_barrier_set()->update_card_table_base(thread);
856 }
857 } resize_and_swap_cl;
858
859 _claimer.apply(&resize_and_swap_cl);
860 }
861
862 double worker_cost() const override {
863 return (double)_claimer.length() / ThreadsPerWorker;
864 }
865 };
866
867 class G1PostEvacuateCollectionSetCleanupTask2::ResetPartialArrayStateManagerTask
868 : public G1AbstractSubTask
869 {
870 public:
871 ResetPartialArrayStateManagerTask()
872 : G1AbstractSubTask(G1GCPhaseTimes::ResetPartialArrayStateManager)
873 {}
874
875 double worker_cost() const override {
876 return AlmostNoWork;
877 }
878
879 void do_work(uint worker_id) override {
880 // This must be in phase2 cleanup, after phase1 has destroyed all of the
881 // associated allocators.
882 G1CollectedHeap::heap()->partial_array_state_manager()->reset();
883 }
884 };
885
886 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
887 G1EvacInfo* evacuation_info,
888 G1EvacFailureRegions* evac_failure_regions) :
889 G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
890 {
891 #if COMPILER2_OR_JVMCI
892 add_serial_task(new UpdateDerivedPointersTask());
893 #endif
894 if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
895 add_serial_task(new EagerlyReclaimHumongousObjectsTask());
896 }
897 add_serial_task(new ResetPartialArrayStateManagerTask());
898
899 if (evac_failure_regions->has_regions_evac_failed()) {
900 add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
901 }
902
903 add_parallel_task(new ResizeTLABsAndSwapCardTableTask());
904 add_parallel_task(new FreeCollectionSetTask(evacuation_info,
905 per_thread_states->surviving_young_words(),
906 evac_failure_regions));
907 }