1 /*
  2  * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "compiler/oopMap.hpp"
 28 #include "gc/g1/g1CardSetMemory.hpp"
 29 #include "gc/g1/g1CardTableEntryClosure.hpp"
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
 32 #include "gc/g1/g1CollectorState.hpp"
 33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
 35 #include "gc/g1/g1EvacInfo.hpp"
 36 #include "gc/g1/g1EvacStats.inline.hpp"
 37 #include "gc/g1/g1HeapRegion.inline.hpp"
 38 #include "gc/g1/g1HeapRegionPrinter.hpp"
 39 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
 40 #include "gc/g1/g1OopClosures.inline.hpp"
 41 #include "gc/g1/g1ParScanThreadState.hpp"
 42 #include "gc/g1/g1RemSet.hpp"
 43 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
 44 #include "gc/shared/bufferNode.hpp"
 45 #include "jfr/jfrEvents.hpp"
 46 #include "oops/access.inline.hpp"
 47 #include "oops/compressedOops.inline.hpp"
 48 #include "oops/oop.inline.hpp"
 49 #include "runtime/prefetch.hpp"
 50 #include "runtime/threads.hpp"
 51 #include "runtime/threadSMR.hpp"
 52 #include "utilities/bitMap.inline.hpp"
 53 #include "utilities/ticks.hpp"
 54 
 55 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
 56   G1ParScanThreadStateSet* _per_thread_states;
 57 
 58 public:
 59   MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
 60     G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
 61     _per_thread_states(per_thread_states) { }
 62 
 63   double worker_cost() const override { return 1.0; }
 64 
 65   void do_work(uint worker_id) override { _per_thread_states->flush_stats(); }
 66 };
 67 
 68 class G1PostEvacuateCollectionSetCleanupTask1::RecalculateUsedTask : public G1AbstractSubTask {
 69   bool _evacuation_failed;
 70   bool _allocation_failed;
 71 
 72 public:
 73   RecalculateUsedTask(bool evacuation_failed, bool allocation_failed) :
 74     G1AbstractSubTask(G1GCPhaseTimes::RecalculateUsed),
 75     _evacuation_failed(evacuation_failed),
 76     _allocation_failed(allocation_failed) { }
 77 
 78   double worker_cost() const override {
 79     // If there is no evacuation failure, the work to perform is minimal.
 80     return _evacuation_failed ? 1.0 : AlmostNoWork;
 81   }
 82 
 83   void do_work(uint worker_id) override {
 84     G1CollectedHeap::heap()->update_used_after_gc(_evacuation_failed);
 85     if (_allocation_failed) {
 86       // Reset the G1GCAllocationFailureALot counters and flags
 87       G1CollectedHeap::heap()->allocation_failure_injector()->reset();
 88     }
 89   }
 90 };
 91 
 92 class G1PostEvacuateCollectionSetCleanupTask1::SampleCollectionSetCandidatesTask : public G1AbstractSubTask {
 93 public:
 94   SampleCollectionSetCandidatesTask() : G1AbstractSubTask(G1GCPhaseTimes::SampleCollectionSetCandidates) { }
 95 
 96   static bool should_execute() {
 97     return G1CollectedHeap::heap()->should_sample_collection_set_candidates();
 98   }
 99 
100   double worker_cost() const override {
101     return should_execute() ? 1.0 : AlmostNoWork;
102   }
103 
104   void do_work(uint worker_id) override {
105     G1CollectedHeap* g1h = G1CollectedHeap::heap();
106 
107     G1MonotonicArenaMemoryStats _total;
108     G1CollectionSetCandidates* candidates = g1h->collection_set()->candidates();
109     for (G1HeapRegion* r : *candidates) {
110       _total.add(r->rem_set()->card_set_memory_stats());
111     }
112     g1h->set_collection_set_candidates_stats(_total);
113   }
114 };
115 
116 class G1PostEvacuateCollectionSetCleanupTask1::RestoreEvacFailureRegionsTask : public G1AbstractSubTask {
117   G1CollectedHeap* _g1h;
118   G1ConcurrentMark* _cm;
119 
120   G1EvacFailureRegions* _evac_failure_regions;
121   CHeapBitMap _chunk_bitmap;
122 
123   uint _num_chunks_per_region;
124   uint _num_evac_fail_regions;
125   size_t _chunk_size;
126 
127   class PhaseTimesStat {
128     static constexpr G1GCPhaseTimes::GCParPhases phase_name =
129       G1GCPhaseTimes::RemoveSelfForwards;
130 
131     G1GCPhaseTimes* _phase_times;
132     uint _worker_id;
133     Ticks _start;
134 
135   public:
136     PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
137       _phase_times(phase_times),
138       _worker_id(worker_id),
139       _start(Ticks::now()) { }
140 
141     ~PhaseTimesStat() {
142       _phase_times->record_or_add_time_secs(phase_name,
143                                             _worker_id,
144                                             (Ticks::now() - _start).seconds());
145     }
146 
147     void register_empty_chunk() {
148       _phase_times->record_or_add_thread_work_item(phase_name,
149                                                    _worker_id,
150                                                    1,
151                                                    G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
152     }
153 
154     void register_nonempty_chunk() {
155       _phase_times->record_or_add_thread_work_item(phase_name,
156                                                    _worker_id,
157                                                    1,
158                                                    G1GCPhaseTimes::RemoveSelfForwardChunksNum);
159     }
160 
161     void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
162       _phase_times->record_or_add_thread_work_item(phase_name,
163                                                    _worker_id,
164                                                    num_marked_obj,
165                                                    G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
166 
167       size_t marked_bytes = marked_words * HeapWordSize;
168       _phase_times->record_or_add_thread_work_item(phase_name,
169                                                    _worker_id,
170                                                    marked_bytes,
171                                                    G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
172     }
173   };
174 
175   // Fill the memory area from start to end with filler objects, and update the BOT
176   // accordingly. Since we clear and use the bitmap for marking objects that failed
177   // evacuation, there is no other work to be done there.
178   static size_t zap_dead_objects(G1HeapRegion* hr, HeapWord* start, HeapWord* end) {
179     assert(start <= end, "precondition");
180     if (start == end) {
181       return 0;
182     }
183 
184     hr->fill_range_with_dead_objects(start, end);
185     return pointer_delta(end, start);
186   }
187 
188   static void update_garbage_words_in_hr(G1HeapRegion* hr, size_t garbage_words) {
189     if (garbage_words != 0) {
190       hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
191     }
192   }
193 
194   static void prefetch_obj(HeapWord* obj_addr) {
195     Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
196   }
197 
198   bool claim_chunk(uint chunk_idx) {
199     return _chunk_bitmap.par_set_bit(chunk_idx);
200   }
201 
202   void process_chunk(uint worker_id, uint chunk_idx) {
203     PhaseTimesStat stat(_g1h->phase_times(), worker_id);
204 
205     G1CMBitMap* bitmap = _cm->mark_bitmap();
206     const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
207     G1HeapRegion* hr = _g1h->region_at(region_idx);
208 
209     HeapWord* hr_bottom = hr->bottom();
210     HeapWord* hr_top = hr->top();
211     HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
212 
213     assert(chunk_start < hr->end(), "inv");
214     if (chunk_start >= hr_top) {
215       return;
216     }
217 
218     HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
219     HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
220 
221     size_t garbage_words = 0;
222 
223     if (chunk_start == hr_bottom) {
224       // This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
225       garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
226     }
227 
228     if (first_marked_addr >= chunk_end) {
229       stat.register_empty_chunk();
230       update_garbage_words_in_hr(hr, garbage_words);
231       return;
232     }
233 
234     stat.register_nonempty_chunk();
235 
236     size_t num_marked_objs = 0;
237     size_t marked_words = 0;
238 
239     HeapWord* obj_addr = first_marked_addr;
240     assert(chunk_start <= obj_addr && obj_addr < chunk_end,
241            "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
242            p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
243     do {
244       assert(bitmap->is_marked(obj_addr), "inv");
245       prefetch_obj(obj_addr);
246 
247       oop obj = cast_to_oop(obj_addr);
248       const size_t obj_size = obj->size();
249       HeapWord* const obj_end_addr = obj_addr + obj_size;
250 
251       {
252         // Process marked object.
253         assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
254         obj->unset_self_forwarded();
255         hr->update_bot_for_block(obj_addr, obj_end_addr);
256 
257         // Statistics
258         num_marked_objs++;
259         marked_words += obj_size;
260       }
261 
262       assert(obj_end_addr <= hr_top, "inv");
263       // Use hr_top as the limit so that we zap dead ranges up to the next
264       // marked obj or hr_top.
265       HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
266       garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
267       obj_addr = next_marked_obj_addr;
268     } while (obj_addr < chunk_end);
269 
270     assert(marked_words > 0 && num_marked_objs > 0, "inv");
271 
272     stat.register_objects_count_and_size(num_marked_objs, marked_words);
273 
274     update_garbage_words_in_hr(hr, garbage_words);
275   }
276 
277 public:
278   RestoreEvacFailureRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
279     G1AbstractSubTask(G1GCPhaseTimes::RestoreEvacuationFailedRegions),
280     _g1h(G1CollectedHeap::heap()),
281     _cm(_g1h->concurrent_mark()),
282     _evac_failure_regions(evac_failure_regions),
283     _chunk_bitmap(mtGC) {
284 
285     _num_evac_fail_regions = _evac_failure_regions->num_regions_evac_failed();
286     _num_chunks_per_region = G1CollectedHeap::get_chunks_per_region();
287 
288     _chunk_size = static_cast<uint>(G1HeapRegion::GrainWords / _num_chunks_per_region);
289 
290     log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
291                         _num_chunks_per_region);
292 
293     _chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
294   }
295 
296   double worker_cost() const override {
297     assert(_evac_failure_regions->has_regions_evac_failed(), "Should not call this if there were no evacuation failures");
298 
299     double workers_per_region = (double)G1CollectedHeap::get_chunks_per_region() / G1RestoreRetainedRegionChunksPerWorker;
300     return workers_per_region * _evac_failure_regions->num_regions_evac_failed();
301   }
302 
303   void do_work(uint worker_id) override {
304     const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
305     const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
306     const uint start_chunk_idx = worker_id * total_chunks / total_workers;
307 
308     for (uint i = 0; i < total_chunks; i++) {
309       const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
310       if (claim_chunk(chunk_idx)) {
311         process_chunk(worker_id, chunk_idx);
312       }
313     }
314   }
315 };
316 
317 G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
318                                                                                  G1EvacFailureRegions* evac_failure_regions) :
319   G1BatchedTask("Post Evacuate Cleanup 1", G1CollectedHeap::heap()->phase_times())
320 {
321   bool evac_failed = evac_failure_regions->has_regions_evac_failed();
322   bool alloc_failed = evac_failure_regions->has_regions_alloc_failed();
323 
324   add_serial_task(new MergePssTask(per_thread_states));
325   add_serial_task(new RecalculateUsedTask(evac_failed, alloc_failed));
326   if (SampleCollectionSetCandidatesTask::should_execute()) {
327     add_serial_task(new SampleCollectionSetCandidatesTask());
328   }
329   add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
330   if (evac_failed) {
331     add_parallel_task(new RestoreEvacFailureRegionsTask(evac_failure_regions));
332   }
333 }
334 
335 class G1FreeHumongousRegionClosure : public HeapRegionIndexClosure {
336   uint _humongous_objects_reclaimed;
337   uint _humongous_regions_reclaimed;
338   size_t _freed_bytes;
339   G1CollectedHeap* _g1h;
340 
341   // Returns whether the given humongous object defined by the start region index
342   // is reclaimable.
343   //
344   // At this point in the garbage collection, checking whether the humongous object
345   // is still a candidate is sufficient because:
346   //
347   // - if it has not been a candidate at the start of collection, it will never
348   // changed to be a candidate during the gc (and live).
349   // - any found outstanding (i.e. in the DCQ, or in its remembered set)
350   // references will set the candidate state to false.
351   // - there can be no references from within humongous starts regions referencing
352   // the object because we never allocate other objects into them.
353   // (I.e. there can be no intra-region references)
354   //
355   // It is not required to check whether the object has been found dead by marking
356   // or not, in fact it would prevent reclamation within a concurrent cycle, as
357   // all objects allocated during that time are considered live.
358   // SATB marking is even more conservative than the remembered set.
359   // So if at this point in the collection we did not find a reference during gc
360   // (or it had enough references to not be a candidate, having many remembered
361   // set entries), nobody has a reference to it.
362   // At the start of collection we flush all refinement logs, and remembered sets
363   // are completely up-to-date wrt to references to the humongous object.
364   //
365   // So there is no need to re-check remembered set size of the humongous region.
366   //
367   // Other implementation considerations:
368   // - never consider object arrays at this time because they would pose
369   // considerable effort for cleaning up the remembered sets. This is
370   // required because stale remembered sets might reference locations that
371   // are currently allocated into.
372   bool is_reclaimable(uint region_idx) const {
373     return G1CollectedHeap::heap()->is_humongous_reclaim_candidate(region_idx);
374   }
375 
376 public:
377   G1FreeHumongousRegionClosure() :
378     _humongous_objects_reclaimed(0),
379     _humongous_regions_reclaimed(0),
380     _freed_bytes(0),
381     _g1h(G1CollectedHeap::heap())
382   {}
383 
384   bool do_heap_region_index(uint region_index) override {
385     if (!is_reclaimable(region_index)) {
386       return false;
387     }
388 
389     G1HeapRegion* r = _g1h->region_at(region_index);
390 
391     oop obj = cast_to_oop(r->bottom());
392     guarantee(obj->is_typeArray(),
393               "Only eagerly reclaiming type arrays is supported, but the object "
394               PTR_FORMAT " is not.", p2i(r->bottom()));
395 
396     log_debug(gc, humongous)("Reclaimed humongous region %u (object size " SIZE_FORMAT " @ " PTR_FORMAT ")",
397                              region_index,
398                              obj->size() * HeapWordSize,
399                              p2i(r->bottom())
400                             );
401 
402     G1ConcurrentMark* const cm = _g1h->concurrent_mark();
403     cm->humongous_object_eagerly_reclaimed(r);
404     assert(!cm->is_marked_in_bitmap(obj),
405            "Eagerly reclaimed humongous region %u should not be marked at all but is in bitmap %s",
406            region_index,
407            BOOL_TO_STR(cm->is_marked_in_bitmap(obj)));
408     _humongous_objects_reclaimed++;
409 
410     auto free_humongous_region = [&] (G1HeapRegion* r) {
411       _freed_bytes += r->used();
412       r->set_containing_set(nullptr);
413       _humongous_regions_reclaimed++;
414       G1HeapRegionPrinter::eager_reclaim(r);
415       _g1h->free_humongous_region(r, nullptr);
416     };
417 
418     _g1h->humongous_obj_regions_iterate(r, free_humongous_region);
419 
420     return false;
421   }
422 
423   uint humongous_objects_reclaimed() {
424     return _humongous_objects_reclaimed;
425   }
426 
427   uint humongous_regions_reclaimed() {
428     return _humongous_regions_reclaimed;
429   }
430 
431   size_t bytes_freed() const {
432     return _freed_bytes;
433   }
434 };
435 
436 #if COMPILER2_OR_JVMCI
437 class G1PostEvacuateCollectionSetCleanupTask2::UpdateDerivedPointersTask : public G1AbstractSubTask {
438 public:
439   UpdateDerivedPointersTask() : G1AbstractSubTask(G1GCPhaseTimes::UpdateDerivedPointers) { }
440 
441   double worker_cost() const override { return 1.0; }
442   void do_work(uint worker_id) override {   DerivedPointerTable::update_pointers(); }
443 };
444 #endif
445 
446 class G1PostEvacuateCollectionSetCleanupTask2::EagerlyReclaimHumongousObjectsTask : public G1AbstractSubTask {
447   uint _humongous_regions_reclaimed;
448   size_t _bytes_freed;
449 
450 public:
451   EagerlyReclaimHumongousObjectsTask() :
452     G1AbstractSubTask(G1GCPhaseTimes::EagerlyReclaimHumongousObjects),
453     _humongous_regions_reclaimed(0),
454     _bytes_freed(0) { }
455 
456   virtual ~EagerlyReclaimHumongousObjectsTask() {
457     G1CollectedHeap* g1h = G1CollectedHeap::heap();
458 
459     g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
460     g1h->decrement_summary_bytes(_bytes_freed);
461   }
462 
463   double worker_cost() const override { return 1.0; }
464   void do_work(uint worker_id) override {
465     G1CollectedHeap* g1h = G1CollectedHeap::heap();
466 
467     G1FreeHumongousRegionClosure cl;
468     g1h->heap_region_iterate(&cl);
469 
470     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
471     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
472     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
473 
474     _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
475     _bytes_freed = cl.bytes_freed();
476   }
477 };
478 
479 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
480   size_t _num_dirtied;
481   G1CollectedHeap* _g1h;
482   G1CardTable* _g1_ct;
483   G1EvacFailureRegions* _evac_failure_regions;
484 
485   G1HeapRegion* region_for_card(CardValue* card_ptr) const {
486     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
487   }
488 
489   bool will_become_free(G1HeapRegion* hr) const {
490     // A region will be freed by during the FreeCollectionSet phase if the region is in the
491     // collection set and has not had an evacuation failure.
492     return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
493   }
494 
495 public:
496   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
497     G1CardTableEntryClosure(),
498     _num_dirtied(0),
499     _g1h(g1h),
500     _g1_ct(g1h->card_table()),
501     _evac_failure_regions(evac_failure_regions) { }
502 
503   void do_card_ptr(CardValue* card_ptr, uint worker_id) {
504     G1HeapRegion* hr = region_for_card(card_ptr);
505 
506     // Should only dirty cards in regions that won't be freed.
507     if (!will_become_free(hr)) {
508       *card_ptr = G1CardTable::dirty_card_val();
509       _num_dirtied++;
510     }
511   }
512 
513   size_t num_dirtied()   const { return _num_dirtied; }
514 };
515 
516 class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTask : public G1AbstractSubTask {
517   G1EvacFailureRegions* _evac_failure_regions;
518   HeapRegionClaimer _claimer;
519 
520   class ProcessEvacuationFailedRegionsClosure : public HeapRegionClosure {
521   public:
522 
523     bool do_heap_region(G1HeapRegion* r) override {
524       G1CollectedHeap* g1h = G1CollectedHeap::heap();
525       G1ConcurrentMark* cm = g1h->concurrent_mark();
526 
527       HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
528       assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
529       assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
530 
531       // Concurrent mark does not mark through regions that we retain (they are root
532       // regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
533       // set eagerly or during evacuation failure.
534       bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() ||
535                              g1h->policy()->should_retain_evac_failed_region(r);
536 
537       if (clear_mark_data) {
538         g1h->clear_bitmap_for_region(r);
539       } else {
540         // This evacuation failed region is going to be marked through. Update mark data.
541         cm->update_top_at_mark_start(r);
542         cm->set_live_bytes(r->hrm_index(), r->live_bytes());
543         assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), cm->top_at_mark_start(r)) != cm->top_at_mark_start(r),
544                "Marks must be on bitmap for region %u", r->hrm_index());
545       }
546       return false;
547     }
548   };
549 
550 public:
551   ProcessEvacuationFailedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
552     G1AbstractSubTask(G1GCPhaseTimes::ProcessEvacuationFailedRegions),
553     _evac_failure_regions(evac_failure_regions),
554     _claimer(0) {
555   }
556 
557   void set_max_workers(uint max_workers) override {
558     _claimer.set_n_workers(max_workers);
559   }
560 
561   double worker_cost() const override {
562     return _evac_failure_regions->num_regions_evac_failed();
563   }
564 
565   void do_work(uint worker_id) override {
566     ProcessEvacuationFailedRegionsClosure cl;
567     _evac_failure_regions->par_iterate(&cl, &_claimer, worker_id);
568   }
569 };
570 
571 class G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask : public G1AbstractSubTask {
572   BufferNodeList* _rdc_buffers;
573   uint _num_buffer_lists;
574   G1EvacFailureRegions* _evac_failure_regions;
575 
576 public:
577   RedirtyLoggedCardsTask(G1EvacFailureRegions* evac_failure_regions, BufferNodeList* rdc_buffers, uint num_buffer_lists) :
578     G1AbstractSubTask(G1GCPhaseTimes::RedirtyCards),
579     _rdc_buffers(rdc_buffers),
580     _num_buffer_lists(num_buffer_lists),
581     _evac_failure_regions(evac_failure_regions) { }
582 
583   double worker_cost() const override {
584     // Needs more investigation.
585     return G1CollectedHeap::heap()->workers()->active_workers();
586   }
587 
588   void do_work(uint worker_id) override {
589     RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap(), _evac_failure_regions);
590 
591     uint start = worker_id;
592     for (uint i = 0; i < _num_buffer_lists; i++) {
593       uint index = (start + i) % _num_buffer_lists;
594 
595       BufferNode* next = Atomic::load(&_rdc_buffers[index]._head);
596       BufferNode* tail = Atomic::load(&_rdc_buffers[index]._tail);
597 
598       while (next != nullptr) {
599         BufferNode* node = next;
600         next = Atomic::cmpxchg(&_rdc_buffers[index]._head, node, (node != tail ) ? node->next() : nullptr);
601         if (next == node) {
602           cl.apply_to_buffer(node, worker_id);
603           next = (node != tail ) ? node->next() : nullptr;
604         } else {
605           break; // If there is contention, move to the next BufferNodeList
606         }
607       }
608     }
609     record_work_item(worker_id, 0, cl.num_dirtied());
610   }
611 };
612 
613 // Helper class to keep statistics for the collection set freeing
614 class FreeCSetStats {
615   size_t _before_used_bytes;   // Usage in regions successfully evacuate
616   size_t _after_used_bytes;    // Usage in regions failing evacuation
617   size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
618   size_t _failure_used_words;  // Live size in failed regions
619   size_t _failure_waste_words; // Wasted size in failed regions
620   size_t _card_rs_length;      // (Card Set) Remembered set size
621   uint _regions_freed;         // Number of regions freed
622 
623 public:
624   FreeCSetStats() :
625       _before_used_bytes(0),
626       _after_used_bytes(0),
627       _bytes_allocated_in_old_since_last_gc(0),
628       _failure_used_words(0),
629       _failure_waste_words(0),
630       _card_rs_length(0),
631       _regions_freed(0) { }
632 
633   void merge_stats(FreeCSetStats* other) {
634     assert(other != nullptr, "invariant");
635     _before_used_bytes += other->_before_used_bytes;
636     _after_used_bytes += other->_after_used_bytes;
637     _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
638     _failure_used_words += other->_failure_used_words;
639     _failure_waste_words += other->_failure_waste_words;
640     _card_rs_length += other->_card_rs_length;
641     _regions_freed += other->_regions_freed;
642   }
643 
644   void report(G1CollectedHeap* g1h, G1EvacInfo* evacuation_info) {
645     evacuation_info->set_regions_freed(_regions_freed);
646     evacuation_info->set_collection_set_used_before(_before_used_bytes + _after_used_bytes);
647     evacuation_info->increment_collection_set_used_after(_after_used_bytes);
648 
649     g1h->decrement_summary_bytes(_before_used_bytes);
650     g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
651 
652     G1Policy *policy = g1h->policy();
653     policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc);
654     policy->record_card_rs_length(_card_rs_length);
655     policy->cset_regions_freed();
656   }
657 
658   void account_failed_region(G1HeapRegion* r) {
659     size_t used_words = r->live_bytes() / HeapWordSize;
660     _failure_used_words += used_words;
661     _failure_waste_words += G1HeapRegion::GrainWords - used_words;
662     _after_used_bytes += r->used();
663 
664     // When moving a young gen region to old gen, we "allocate" that whole
665     // region there. This is in addition to any already evacuated objects.
666     // Notify the policy about that. Old gen regions do not cause an
667     // additional allocation: both the objects still in the region and the
668     // ones already moved are accounted for elsewhere.
669     if (r->is_young()) {
670       _bytes_allocated_in_old_since_last_gc += G1HeapRegion::GrainBytes;
671     }
672   }
673 
674   void account_evacuated_region(G1HeapRegion* r) {
675     size_t used = r->used();
676     assert(used > 0, "region %u %s zero used", r->hrm_index(), r->get_short_type_str());
677     _before_used_bytes += used;
678     _regions_freed += 1;
679   }
680 
681   void account_card_rs_length(G1HeapRegion* r) {
682     _card_rs_length += r->rem_set()->occupied();
683   }
684 };
685 
686 // Closure applied to all regions in the collection set.
687 class FreeCSetClosure : public HeapRegionClosure {
688   // Helper to send JFR events for regions.
689   class JFREventForRegion {
690     EventGCPhaseParallel _event;
691 
692   public:
693     JFREventForRegion(G1HeapRegion* region, uint worker_id) : _event() {
694       _event.set_gcId(GCId::current());
695       _event.set_gcWorkerId(worker_id);
696       if (region->is_young()) {
697         _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
698       } else {
699         _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
700       }
701     }
702 
703     ~JFREventForRegion() {
704       _event.commit();
705     }
706   };
707 
708   // Helper to do timing for region work.
709   class TimerForRegion {
710     Tickspan& _time;
711     Ticks     _start_time;
712   public:
713     TimerForRegion(Tickspan& time) : _time(time), _start_time(Ticks::now()) { }
714     ~TimerForRegion() {
715       _time += Ticks::now() - _start_time;
716     }
717   };
718 
719   // FreeCSetClosure members
720   G1CollectedHeap* _g1h;
721   const size_t*    _surviving_young_words;
722   uint             _worker_id;
723   Tickspan         _young_time;
724   Tickspan         _non_young_time;
725   FreeCSetStats*   _stats;
726   G1EvacFailureRegions* _evac_failure_regions;
727   uint             _num_retained_regions;
728 
729   void assert_tracks_surviving_words(G1HeapRegion* r) {
730     assert(r->young_index_in_cset() != 0 &&
731            (uint)r->young_index_in_cset() <= _g1h->collection_set()->young_region_length(),
732            "Young index %u is wrong for region %u of type %s with %u young regions",
733            r->young_index_in_cset(), r->hrm_index(), r->get_type_str(), _g1h->collection_set()->young_region_length());
734   }
735 
736   void handle_evacuated_region(G1HeapRegion* r) {
737     assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
738     stats()->account_evacuated_region(r);
739 
740     G1HeapRegionPrinter::evac_reclaim(r);
741     // Free the region and its remembered set.
742     _g1h->free_region(r, nullptr);
743   }
744 
745   void handle_failed_region(G1HeapRegion* r) {
746     // Do some allocation statistics accounting. Regions that failed evacuation
747     // are always made old, so there is no need to update anything in the young
748     // gen statistics, but we need to update old gen statistics.
749     stats()->account_failed_region(r);
750 
751     G1GCPhaseTimes* p = _g1h->phase_times();
752     assert(r->in_collection_set(), "Failed evacuation of region %u not in collection set", r->hrm_index());
753 
754     p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreEvacuationFailedRegions,
755                                       _worker_id,
756                                       1,
757                                       G1GCPhaseTimes::RestoreEvacFailureRegionsEvacFailedNum);
758 
759     bool retain_region = _g1h->policy()->should_retain_evac_failed_region(r);
760     // Update the region state due to the failed evacuation.
761     r->handle_evacuation_failure(retain_region);
762     assert(r->is_old(), "must already be relabelled as old");
763 
764     if (retain_region) {
765       _g1h->retain_region(r);
766       _num_retained_regions++;
767     }
768     assert(retain_region == r->rem_set()->is_tracked(), "When retaining a region, remembered set should be kept.");
769 
770     // Add region to old set, need to hold lock.
771     MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
772     _g1h->old_set_add(r);
773   }
774 
775   Tickspan& timer_for_region(G1HeapRegion* r) {
776     return r->is_young() ? _young_time : _non_young_time;
777   }
778 
779   FreeCSetStats* stats() {
780     return _stats;
781   }
782 
783 public:
784   FreeCSetClosure(const size_t* surviving_young_words,
785                   uint worker_id,
786                   FreeCSetStats* stats,
787                   G1EvacFailureRegions* evac_failure_regions) :
788       HeapRegionClosure(),
789       _g1h(G1CollectedHeap::heap()),
790       _surviving_young_words(surviving_young_words),
791       _worker_id(worker_id),
792       _young_time(),
793       _non_young_time(),
794       _stats(stats),
795       _evac_failure_regions(evac_failure_regions),
796       _num_retained_regions(0) { }
797 
798   virtual bool do_heap_region(G1HeapRegion* r) {
799     assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
800     JFREventForRegion event(r, _worker_id);
801     TimerForRegion timer(timer_for_region(r));
802 
803     stats()->account_card_rs_length(r);
804 
805     if (r->is_young()) {
806       assert_tracks_surviving_words(r);
807       r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
808     }
809 
810     if (_evac_failure_regions->contains(r->hrm_index())) {
811       handle_failed_region(r);
812     } else {
813       handle_evacuated_region(r);
814     }
815     assert(!_g1h->is_on_master_free_list(r), "sanity");
816 
817     return false;
818   }
819 
820   void report_timing() {
821     G1GCPhaseTimes* pt = _g1h->phase_times();
822     if (_young_time.value() > 0) {
823       pt->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, _worker_id, _young_time.seconds());
824     }
825     if (_non_young_time.value() > 0) {
826       pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
827     }
828   }
829 
830   bool num_retained_regions() const { return _num_retained_regions; }
831 };
832 
833 class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1AbstractSubTask {
834   G1CollectedHeap*  _g1h;
835   G1EvacInfo*       _evacuation_info;
836   FreeCSetStats*    _worker_stats;
837   HeapRegionClaimer _claimer;
838   const size_t*     _surviving_young_words;
839   uint              _active_workers;
840   G1EvacFailureRegions* _evac_failure_regions;
841   volatile uint     _num_retained_regions;
842 
843   FreeCSetStats* worker_stats(uint worker) {
844     return &_worker_stats[worker];
845   }
846 
847   void report_statistics() {
848     // Merge the accounting
849     FreeCSetStats total_stats;
850     for (uint worker = 0; worker < _active_workers; worker++) {
851       total_stats.merge_stats(worker_stats(worker));
852     }
853     total_stats.report(_g1h, _evacuation_info);
854   }
855 
856 public:
857   FreeCollectionSetTask(G1EvacInfo* evacuation_info,
858                         const size_t* surviving_young_words,
859                         G1EvacFailureRegions* evac_failure_regions) :
860     G1AbstractSubTask(G1GCPhaseTimes::FreeCollectionSet),
861     _g1h(G1CollectedHeap::heap()),
862     _evacuation_info(evacuation_info),
863     _worker_stats(nullptr),
864     _claimer(0),
865     _surviving_young_words(surviving_young_words),
866     _active_workers(0),
867     _evac_failure_regions(evac_failure_regions),
868     _num_retained_regions(0) {
869 
870     _g1h->clear_eden();
871   }
872 
873   virtual ~FreeCollectionSetTask() {
874     Ticks serial_time = Ticks::now();
875 
876     bool has_new_retained_regions = Atomic::load(&_num_retained_regions) != 0;
877     if (has_new_retained_regions) {
878       G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
879       candidates->sort_by_efficiency();
880     }
881 
882     report_statistics();
883     for (uint worker = 0; worker < _active_workers; worker++) {
884       _worker_stats[worker].~FreeCSetStats();
885     }
886     FREE_C_HEAP_ARRAY(FreeCSetStats, _worker_stats);
887 
888     G1GCPhaseTimes* p = _g1h->phase_times();
889     p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
890 
891     _g1h->clear_collection_set();
892   }
893 
894   double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->region_length(); }
895 
896   void set_max_workers(uint max_workers) override {
897     _active_workers = max_workers;
898     _worker_stats = NEW_C_HEAP_ARRAY(FreeCSetStats, max_workers, mtGC);
899     for (uint worker = 0; worker < _active_workers; worker++) {
900       ::new (&_worker_stats[worker]) FreeCSetStats();
901     }
902     _claimer.set_n_workers(_active_workers);
903   }
904 
905   void do_work(uint worker_id) override {
906     FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id), _evac_failure_regions);
907     _g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
908     // Report per-region type timings.
909     cl.report_timing();
910 
911     Atomic::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed);
912   }
913 };
914 
915 class G1PostEvacuateCollectionSetCleanupTask2::ResizeTLABsTask : public G1AbstractSubTask {
916   G1JavaThreadsListClaimer _claimer;
917 
918   // There is not much work per thread so the number of threads per worker is high.
919   static const uint ThreadsPerWorker = 250;
920 
921 public:
922   ResizeTLABsTask() : G1AbstractSubTask(G1GCPhaseTimes::ResizeThreadLABs), _claimer(ThreadsPerWorker) { }
923 
924   void do_work(uint worker_id) override {
925     class ResizeClosure : public ThreadClosure {
926     public:
927 
928       void do_thread(Thread* thread) {
929         static_cast<JavaThread*>(thread)->tlab().resize();
930       }
931     } cl;
932     _claimer.apply(&cl);
933   }
934 
935   double worker_cost() const override {
936     return (double)_claimer.length() / ThreadsPerWorker;
937   }
938 };
939 
940 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
941                                                                                  G1EvacInfo* evacuation_info,
942                                                                                  G1EvacFailureRegions* evac_failure_regions) :
943   G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
944 {
945 #if COMPILER2_OR_JVMCI
946   add_serial_task(new UpdateDerivedPointersTask());
947 #endif
948   if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
949     add_serial_task(new EagerlyReclaimHumongousObjectsTask());
950   }
951 
952   if (evac_failure_regions->has_regions_evac_failed()) {
953     add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
954   }
955   add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
956                                                per_thread_states->rdc_buffers(),
957                                                per_thread_states->num_workers()));
958 
959   if (UseTLAB && ResizeTLAB) {
960     add_parallel_task(new ResizeTLABsTask());
961   }
962   add_parallel_task(new FreeCollectionSetTask(evacuation_info,
963                                               per_thread_states->surviving_young_words(),
964                                               evac_failure_regions));
965 }