1 /*
  2  * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "compiler/oopMap.hpp"
 28 #include "gc/g1/g1CardSetMemory.hpp"
 29 #include "gc/g1/g1CardTableEntryClosure.hpp"
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1CollectionSetCandidates.inline.hpp"
 32 #include "gc/g1/g1CollectorState.hpp"
 33 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 34 #include "gc/g1/g1EvacFailureRegions.inline.hpp"
 35 #include "gc/g1/g1EvacInfo.hpp"
 36 #include "gc/g1/g1EvacStats.inline.hpp"
 37 #include "gc/g1/g1HeapRegion.inline.hpp"
 38 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
 39 #include "gc/g1/g1OopClosures.inline.hpp"
 40 #include "gc/g1/g1ParScanThreadState.hpp"
 41 #include "gc/g1/g1RemSet.hpp"
 42 #include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
 43 #include "gc/shared/bufferNode.hpp"
 44 #include "gc/shared/preservedMarks.inline.hpp"
 45 #include "jfr/jfrEvents.hpp"
 46 #include "oops/access.inline.hpp"
 47 #include "oops/compressedOops.inline.hpp"
 48 #include "oops/oop.inline.hpp"
 49 #include "runtime/prefetch.hpp"
 50 #include "runtime/threads.hpp"
 51 #include "runtime/threadSMR.hpp"
 52 #include "utilities/bitMap.inline.hpp"
 53 #include "utilities/ticks.hpp"
 54 
 55 class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
 56   G1ParScanThreadStateSet* _per_thread_states;
 57 
 58 public:
 59   MergePssTask(G1ParScanThreadStateSet* per_thread_states) :
 60     G1AbstractSubTask(G1GCPhaseTimes::MergePSS),
 61     _per_thread_states(per_thread_states) { }
 62 
 63   double worker_cost() const override { return 1.0; }
 64 
 65   void do_work(uint worker_id) override { _per_thread_states->flush_stats(); }
 66 };
 67 
 68 class G1PostEvacuateCollectionSetCleanupTask1::RecalculateUsedTask : public G1AbstractSubTask {
 69   bool _evacuation_failed;
 70   bool _allocation_failed;
 71 
 72 public:
 73   RecalculateUsedTask(bool evacuation_failed, bool allocation_failed) :
 74     G1AbstractSubTask(G1GCPhaseTimes::RecalculateUsed),
 75     _evacuation_failed(evacuation_failed),
 76     _allocation_failed(allocation_failed) { }
 77 
 78   double worker_cost() const override {
 79     // If there is no evacuation failure, the work to perform is minimal.
 80     return _evacuation_failed ? 1.0 : AlmostNoWork;
 81   }
 82 
 83   void do_work(uint worker_id) override {
 84     G1CollectedHeap::heap()->update_used_after_gc(_evacuation_failed);
 85     if (_allocation_failed) {
 86       // Reset the G1GCAllocationFailureALot counters and flags
 87       G1CollectedHeap::heap()->allocation_failure_injector()->reset();
 88     }
 89   }
 90 };
 91 
 92 class G1PostEvacuateCollectionSetCleanupTask1::SampleCollectionSetCandidatesTask : public G1AbstractSubTask {
 93 public:
 94   SampleCollectionSetCandidatesTask() : G1AbstractSubTask(G1GCPhaseTimes::SampleCollectionSetCandidates) { }
 95 
 96   static bool should_execute() {
 97     return G1CollectedHeap::heap()->should_sample_collection_set_candidates();
 98   }
 99 
100   double worker_cost() const override {
101     return should_execute() ? 1.0 : AlmostNoWork;
102   }
103 
104   void do_work(uint worker_id) override {
105     G1CollectedHeap* g1h = G1CollectedHeap::heap();
106 
107     G1MonotonicArenaMemoryStats _total;
108     G1CollectionSetCandidates* candidates = g1h->collection_set()->candidates();
109     for (HeapRegion* r : *candidates) {
110       _total.add(r->rem_set()->card_set_memory_stats());
111     }
112     g1h->set_collection_set_candidates_stats(_total);
113   }
114 };
115 
116 class G1PostEvacuateCollectionSetCleanupTask1::RestoreEvacFailureRegionsTask : public G1AbstractSubTask {
117   G1CollectedHeap* _g1h;
118   G1ConcurrentMark* _cm;
119 
120   G1EvacFailureRegions* _evac_failure_regions;
121   CHeapBitMap _chunk_bitmap;
122 
123   uint _num_chunks_per_region;
124   uint _num_evac_fail_regions;
125   size_t _chunk_size;
126 
127   class PhaseTimesStat {
128     static constexpr G1GCPhaseTimes::GCParPhases phase_name =
129       G1GCPhaseTimes::RemoveSelfForwards;
130 
131     G1GCPhaseTimes* _phase_times;
132     uint _worker_id;
133     Ticks _start;
134 
135   public:
136     PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
137       _phase_times(phase_times),
138       _worker_id(worker_id),
139       _start(Ticks::now()) { }
140 
141     ~PhaseTimesStat() {
142       _phase_times->record_or_add_time_secs(phase_name,
143                                             _worker_id,
144                                             (Ticks::now() - _start).seconds());
145     }
146 
147     void register_empty_chunk() {
148       _phase_times->record_or_add_thread_work_item(phase_name,
149                                                    _worker_id,
150                                                    1,
151                                                    G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
152     }
153 
154     void register_nonempty_chunk() {
155       _phase_times->record_or_add_thread_work_item(phase_name,
156                                                    _worker_id,
157                                                    1,
158                                                    G1GCPhaseTimes::RemoveSelfForwardChunksNum);
159     }
160 
161     void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
162       _phase_times->record_or_add_thread_work_item(phase_name,
163                                                    _worker_id,
164                                                    num_marked_obj,
165                                                    G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
166 
167       size_t marked_bytes = marked_words * HeapWordSize;
168       _phase_times->record_or_add_thread_work_item(phase_name,
169                                                    _worker_id,
170                                                    marked_bytes,
171                                                    G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
172     }
173   };
174 
175   // Fill the memory area from start to end with filler objects, and update the BOT
176   // accordingly. Since we clear and use the bitmap for marking objects that failed
177   // evacuation, there is no other work to be done there.
178   static size_t zap_dead_objects(HeapRegion* hr, HeapWord* start, HeapWord* end) {
179     assert(start <= end, "precondition");
180     if (start == end) {
181       return 0;
182     }
183 
184     hr->fill_range_with_dead_objects(start, end);
185     return pointer_delta(end, start);
186   }
187 
188   static void update_garbage_words_in_hr(HeapRegion* hr, size_t garbage_words) {
189     if (garbage_words != 0) {
190       hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
191     }
192   }
193 
194   static void prefetch_obj(HeapWord* obj_addr) {
195     Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
196   }
197 
198   bool claim_chunk(uint chunk_idx) {
199     return _chunk_bitmap.par_set_bit(chunk_idx);
200   }
201 
202   void process_chunk(uint worker_id, uint chunk_idx) {
203     PhaseTimesStat stat(_g1h->phase_times(), worker_id);
204 
205     G1CMBitMap* bitmap = _cm->mark_bitmap();
206     const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
207     HeapRegion* hr = _g1h->region_at(region_idx);
208 
209     HeapWord* hr_bottom = hr->bottom();
210     HeapWord* hr_top = hr->top();
211     HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
212 
213     assert(chunk_start < hr->end(), "inv");
214     if (chunk_start >= hr_top) {
215       return;
216     }
217 
218     HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
219     HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
220 
221     size_t garbage_words = 0;
222 
223     if (chunk_start == hr_bottom) {
224       // This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
225       garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
226     }
227 
228     if (first_marked_addr >= chunk_end) {
229       stat.register_empty_chunk();
230       update_garbage_words_in_hr(hr, garbage_words);
231       return;
232     }
233 
234     stat.register_nonempty_chunk();
235 
236     size_t num_marked_objs = 0;
237     size_t marked_words = 0;
238 
239     HeapWord* obj_addr = first_marked_addr;
240     assert(chunk_start <= obj_addr && obj_addr < chunk_end,
241            "object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
242            p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
243     do {
244       assert(bitmap->is_marked(obj_addr), "inv");
245       prefetch_obj(obj_addr);
246 
247       oop obj = cast_to_oop(obj_addr);
248       const size_t obj_size = obj->size();
249       HeapWord* const obj_end_addr = obj_addr + obj_size;
250 
251       {
252         // Process marked object.
253         assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
254         obj->init_mark();
255         hr->update_bot_for_block(obj_addr, obj_end_addr);
256 
257         // Statistics
258         num_marked_objs++;
259         marked_words += obj_size;
260       }
261 
262       assert(obj_end_addr <= hr_top, "inv");
263       // Use hr_top as the limit so that we zap dead ranges up to the next
264       // marked obj or hr_top.
265       HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
266       garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
267       obj_addr = next_marked_obj_addr;
268     } while (obj_addr < chunk_end);
269 
270     assert(marked_words > 0 && num_marked_objs > 0, "inv");
271 
272     stat.register_objects_count_and_size(num_marked_objs, marked_words);
273 
274     update_garbage_words_in_hr(hr, garbage_words);
275   }
276 
277 public:
278   RestoreEvacFailureRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
279     G1AbstractSubTask(G1GCPhaseTimes::RestoreEvacuationFailedRegions),
280     _g1h(G1CollectedHeap::heap()),
281     _cm(_g1h->concurrent_mark()),
282     _evac_failure_regions(evac_failure_regions),
283     _chunk_bitmap(mtGC) {
284 
285     _num_evac_fail_regions = _evac_failure_regions->num_regions_evac_failed();
286     _num_chunks_per_region = G1CollectedHeap::get_chunks_per_region();
287 
288     _chunk_size = static_cast<uint>(HeapRegion::GrainWords / _num_chunks_per_region);
289 
290     log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
291                         _num_chunks_per_region);
292 
293     _chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
294   }
295 
296   double worker_cost() const override {
297     assert(_evac_failure_regions->has_regions_evac_failed(), "Should not call this if there were no evacuation failures");
298 
299     double workers_per_region = (double)G1CollectedHeap::get_chunks_per_region() / G1RestoreRetainedRegionChunksPerWorker;
300     return workers_per_region * _evac_failure_regions->num_regions_evac_failed();
301   }
302 
303   void do_work(uint worker_id) override {
304     const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
305     const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
306     const uint start_chunk_idx = worker_id * total_chunks / total_workers;
307 
308     for (uint i = 0; i < total_chunks; i++) {
309       const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
310       if (claim_chunk(chunk_idx)) {
311         process_chunk(worker_id, chunk_idx);
312       }
313     }
314   }
315 };
316 
317 G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
318                                                                                  G1EvacFailureRegions* evac_failure_regions) :
319   G1BatchedTask("Post Evacuate Cleanup 1", G1CollectedHeap::heap()->phase_times())
320 {
321   bool evac_failed = evac_failure_regions->has_regions_evac_failed();
322   bool alloc_failed = evac_failure_regions->has_regions_alloc_failed();
323 
324   add_serial_task(new MergePssTask(per_thread_states));
325   add_serial_task(new RecalculateUsedTask(evac_failed, alloc_failed));
326   if (SampleCollectionSetCandidatesTask::should_execute()) {
327     add_serial_task(new SampleCollectionSetCandidatesTask());
328   }
329   add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
330   if (evac_failed) {
331     add_parallel_task(new RestoreEvacFailureRegionsTask(evac_failure_regions));
332   }
333 }
334 
335 class G1FreeHumongousRegionClosure : public HeapRegionIndexClosure {
336   uint _humongous_objects_reclaimed;
337   uint _humongous_regions_reclaimed;
338   size_t _freed_bytes;
339   G1CollectedHeap* _g1h;
340 
341   // Returns whether the given humongous object defined by the start region index
342   // is reclaimable.
343   //
344   // At this point in the garbage collection, checking whether the humongous object
345   // is still a candidate is sufficient because:
346   //
347   // - if it has not been a candidate at the start of collection, it will never
348   // changed to be a candidate during the gc (and live).
349   // - any found outstanding (i.e. in the DCQ, or in its remembered set)
350   // references will set the candidate state to false.
351   // - there can be no references from within humongous starts regions referencing
352   // the object because we never allocate other objects into them.
353   // (I.e. there can be no intra-region references)
354   //
355   // It is not required to check whether the object has been found dead by marking
356   // or not, in fact it would prevent reclamation within a concurrent cycle, as
357   // all objects allocated during that time are considered live.
358   // SATB marking is even more conservative than the remembered set.
359   // So if at this point in the collection we did not find a reference during gc
360   // (or it had enough references to not be a candidate, having many remembered
361   // set entries), nobody has a reference to it.
362   // At the start of collection we flush all refinement logs, and remembered sets
363   // are completely up-to-date wrt to references to the humongous object.
364   //
365   // So there is no need to re-check remembered set size of the humongous region.
366   //
367   // Other implementation considerations:
368   // - never consider object arrays at this time because they would pose
369   // considerable effort for cleaning up the remembered sets. This is
370   // required because stale remembered sets might reference locations that
371   // are currently allocated into.
372   bool is_reclaimable(uint region_idx) const {
373     return G1CollectedHeap::heap()->is_humongous_reclaim_candidate(region_idx);
374   }
375 
376 public:
377   G1FreeHumongousRegionClosure() :
378     _humongous_objects_reclaimed(0),
379     _humongous_regions_reclaimed(0),
380     _freed_bytes(0),
381     _g1h(G1CollectedHeap::heap())
382   {}
383 
384   bool do_heap_region_index(uint region_index) override {
385     if (!is_reclaimable(region_index)) {
386       return false;
387     }
388 
389     HeapRegion* r = _g1h->region_at(region_index);
390 
391     oop obj = cast_to_oop(r->bottom());
392     guarantee(obj->is_typeArray(),
393               "Only eagerly reclaiming type arrays is supported, but the object "
394               PTR_FORMAT " is not.", p2i(r->bottom()));
395 
396     log_debug(gc, humongous)("Reclaimed humongous region %u (object size " SIZE_FORMAT " @ " PTR_FORMAT ")",
397                              region_index,
398                              obj->size() * HeapWordSize,
399                              p2i(r->bottom())
400                             );
401 
402     G1ConcurrentMark* const cm = _g1h->concurrent_mark();
403     cm->humongous_object_eagerly_reclaimed(r);
404     assert(!cm->is_marked_in_bitmap(obj),
405            "Eagerly reclaimed humongous region %u should not be marked at all but is in bitmap %s",
406            region_index,
407            BOOL_TO_STR(cm->is_marked_in_bitmap(obj)));
408     _humongous_objects_reclaimed++;
409 
410     auto free_humongous_region = [&] (HeapRegion* r) {
411       _freed_bytes += r->used();
412       r->set_containing_set(nullptr);
413       _humongous_regions_reclaimed++;
414       _g1h->free_humongous_region(r, nullptr);
415       _g1h->hr_printer()->cleanup(r);
416     };
417 
418     _g1h->humongous_obj_regions_iterate(r, free_humongous_region);
419 
420     return false;
421   }
422 
423   uint humongous_objects_reclaimed() {
424     return _humongous_objects_reclaimed;
425   }
426 
427   uint humongous_regions_reclaimed() {
428     return _humongous_regions_reclaimed;
429   }
430 
431   size_t bytes_freed() const {
432     return _freed_bytes;
433   }
434 };
435 
436 #if COMPILER2_OR_JVMCI
437 class G1PostEvacuateCollectionSetCleanupTask2::UpdateDerivedPointersTask : public G1AbstractSubTask {
438 public:
439   UpdateDerivedPointersTask() : G1AbstractSubTask(G1GCPhaseTimes::UpdateDerivedPointers) { }
440 
441   double worker_cost() const override { return 1.0; }
442   void do_work(uint worker_id) override {   DerivedPointerTable::update_pointers(); }
443 };
444 #endif
445 
446 class G1PostEvacuateCollectionSetCleanupTask2::EagerlyReclaimHumongousObjectsTask : public G1AbstractSubTask {
447   uint _humongous_regions_reclaimed;
448   size_t _bytes_freed;
449 
450 public:
451   EagerlyReclaimHumongousObjectsTask() :
452     G1AbstractSubTask(G1GCPhaseTimes::EagerlyReclaimHumongousObjects),
453     _humongous_regions_reclaimed(0),
454     _bytes_freed(0) { }
455 
456   virtual ~EagerlyReclaimHumongousObjectsTask() {
457     G1CollectedHeap* g1h = G1CollectedHeap::heap();
458 
459     g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
460     g1h->decrement_summary_bytes(_bytes_freed);
461   }
462 
463   double worker_cost() const override { return 1.0; }
464   void do_work(uint worker_id) override {
465     G1CollectedHeap* g1h = G1CollectedHeap::heap();
466 
467     G1FreeHumongousRegionClosure cl;
468     g1h->heap_region_iterate(&cl);
469 
470     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumTotal, g1h->num_humongous_objects());
471     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumCandidates, g1h->num_humongous_reclaim_candidates());
472     record_work_item(worker_id, G1GCPhaseTimes::EagerlyReclaimNumReclaimed, cl.humongous_objects_reclaimed());
473 
474     _humongous_regions_reclaimed = cl.humongous_regions_reclaimed();
475     _bytes_freed = cl.bytes_freed();
476   }
477 };
478 
479 class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : public G1AbstractSubTask {
480   PreservedMarksSet* _preserved_marks;
481   WorkerTask* _task;
482 
483 public:
484   RestorePreservedMarksTask(PreservedMarksSet* preserved_marks) :
485     G1AbstractSubTask(G1GCPhaseTimes::RestorePreservedMarks),
486     _preserved_marks(preserved_marks),
487     _task(preserved_marks->create_task()) { }
488 
489   virtual ~RestorePreservedMarksTask() {
490     delete _task;
491   }
492 
493   double worker_cost() const override {
494     return _preserved_marks->num();
495   }
496 
497   void do_work(uint worker_id) override { _task->work(worker_id); }
498 };
499 
500 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
501   size_t _num_dirtied;
502   G1CollectedHeap* _g1h;
503   G1CardTable* _g1_ct;
504   G1EvacFailureRegions* _evac_failure_regions;
505 
506   HeapRegion* region_for_card(CardValue* card_ptr) const {
507     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
508   }
509 
510   bool will_become_free(HeapRegion* hr) const {
511     // A region will be freed by during the FreeCollectionSet phase if the region is in the
512     // collection set and has not had an evacuation failure.
513     return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
514   }
515 
516 public:
517   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
518     G1CardTableEntryClosure(),
519     _num_dirtied(0),
520     _g1h(g1h),
521     _g1_ct(g1h->card_table()),
522     _evac_failure_regions(evac_failure_regions) { }
523 
524   void do_card_ptr(CardValue* card_ptr, uint worker_id) {
525     HeapRegion* hr = region_for_card(card_ptr);
526 
527     // Should only dirty cards in regions that won't be freed.
528     if (!will_become_free(hr)) {
529       *card_ptr = G1CardTable::dirty_card_val();
530       _num_dirtied++;
531     }
532   }
533 
534   size_t num_dirtied()   const { return _num_dirtied; }
535 };
536 
537 class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTask : public G1AbstractSubTask {
538   G1EvacFailureRegions* _evac_failure_regions;
539   HeapRegionClaimer _claimer;
540 
541   class ProcessEvacuationFailedRegionsClosure : public HeapRegionClosure {
542   public:
543 
544     bool do_heap_region(HeapRegion* r) override {
545       G1CollectedHeap* g1h = G1CollectedHeap::heap();
546       G1ConcurrentMark* cm = g1h->concurrent_mark();
547 
548       HeapWord* top_at_mark_start = cm->top_at_mark_start(r);
549       assert(top_at_mark_start == r->bottom(), "TAMS must not have been set for region %u", r->hrm_index());
550       assert(cm->live_bytes(r->hrm_index()) == 0, "Marking live bytes must not be set for region %u", r->hrm_index());
551 
552       // Concurrent mark does not mark through regions that we retain (they are root
553       // regions wrt to marking), so we must clear their mark data (tams, bitmap, ...)
554       // set eagerly or during evacuation failure.
555       bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() ||
556                              g1h->policy()->should_retain_evac_failed_region(r);
557 
558       if (clear_mark_data) {
559         g1h->clear_bitmap_for_region(r);
560       } else {
561         // This evacuation failed region is going to be marked through. Update mark data.
562         cm->update_top_at_mark_start(r);
563         cm->set_live_bytes(r->hrm_index(), r->live_bytes());
564         assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), cm->top_at_mark_start(r)) != cm->top_at_mark_start(r),
565                "Marks must be on bitmap for region %u", r->hrm_index());
566       }
567       return false;
568     }
569   };
570 
571 public:
572   ProcessEvacuationFailedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
573     G1AbstractSubTask(G1GCPhaseTimes::ProcessEvacuationFailedRegions),
574     _evac_failure_regions(evac_failure_regions),
575     _claimer(0) {
576   }
577 
578   void set_max_workers(uint max_workers) override {
579     _claimer.set_n_workers(max_workers);
580   }
581 
582   double worker_cost() const override {
583     return _evac_failure_regions->num_regions_evac_failed();
584   }
585 
586   void do_work(uint worker_id) override {
587     ProcessEvacuationFailedRegionsClosure cl;
588     _evac_failure_regions->par_iterate(&cl, &_claimer, worker_id);
589   }
590 };
591 
592 class G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask : public G1AbstractSubTask {
593   BufferNodeList* _rdc_buffers;
594   uint _num_buffer_lists;
595   G1EvacFailureRegions* _evac_failure_regions;
596 
597 public:
598   RedirtyLoggedCardsTask(G1EvacFailureRegions* evac_failure_regions, BufferNodeList* rdc_buffers, uint num_buffer_lists) :
599     G1AbstractSubTask(G1GCPhaseTimes::RedirtyCards),
600     _rdc_buffers(rdc_buffers),
601     _num_buffer_lists(num_buffer_lists),
602     _evac_failure_regions(evac_failure_regions) { }
603 
604   double worker_cost() const override {
605     // Needs more investigation.
606     return G1CollectedHeap::heap()->workers()->active_workers();
607   }
608 
609   void do_work(uint worker_id) override {
610     RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap(), _evac_failure_regions);
611 
612     uint start = worker_id;
613     for (uint i = 0; i < _num_buffer_lists; i++) {
614       uint index = (start + i) % _num_buffer_lists;
615 
616       BufferNode* next = Atomic::load(&_rdc_buffers[index]._head);
617       BufferNode* tail = Atomic::load(&_rdc_buffers[index]._tail);
618 
619       while (next != nullptr) {
620         BufferNode* node = next;
621         next = Atomic::cmpxchg(&_rdc_buffers[index]._head, node, (node != tail ) ? node->next() : nullptr);
622         if (next == node) {
623           cl.apply_to_buffer(node, worker_id);
624           next = (node != tail ) ? node->next() : nullptr;
625         } else {
626           break; // If there is contention, move to the next BufferNodeList
627         }
628       }
629     }
630     record_work_item(worker_id, 0, cl.num_dirtied());
631   }
632 };
633 
634 // Helper class to keep statistics for the collection set freeing
635 class FreeCSetStats {
636   size_t _before_used_bytes;   // Usage in regions successfully evacuate
637   size_t _after_used_bytes;    // Usage in regions failing evacuation
638   size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
639   size_t _failure_used_words;  // Live size in failed regions
640   size_t _failure_waste_words; // Wasted size in failed regions
641   size_t _card_rs_length;      // (Card Set) Remembered set size
642   uint _regions_freed;         // Number of regions freed
643 
644 public:
645   FreeCSetStats() :
646       _before_used_bytes(0),
647       _after_used_bytes(0),
648       _bytes_allocated_in_old_since_last_gc(0),
649       _failure_used_words(0),
650       _failure_waste_words(0),
651       _card_rs_length(0),
652       _regions_freed(0) { }
653 
654   void merge_stats(FreeCSetStats* other) {
655     assert(other != nullptr, "invariant");
656     _before_used_bytes += other->_before_used_bytes;
657     _after_used_bytes += other->_after_used_bytes;
658     _bytes_allocated_in_old_since_last_gc += other->_bytes_allocated_in_old_since_last_gc;
659     _failure_used_words += other->_failure_used_words;
660     _failure_waste_words += other->_failure_waste_words;
661     _card_rs_length += other->_card_rs_length;
662     _regions_freed += other->_regions_freed;
663   }
664 
665   void report(G1CollectedHeap* g1h, G1EvacInfo* evacuation_info) {
666     evacuation_info->set_regions_freed(_regions_freed);
667     evacuation_info->set_collection_set_used_before(_before_used_bytes + _after_used_bytes);
668     evacuation_info->increment_collection_set_used_after(_after_used_bytes);
669 
670     g1h->decrement_summary_bytes(_before_used_bytes);
671     g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
672 
673     G1Policy *policy = g1h->policy();
674     policy->old_gen_alloc_tracker()->add_allocated_bytes_since_last_gc(_bytes_allocated_in_old_since_last_gc);
675     policy->record_card_rs_length(_card_rs_length);
676     policy->cset_regions_freed();
677   }
678 
679   void account_failed_region(HeapRegion* r) {
680     size_t used_words = r->live_bytes() / HeapWordSize;
681     _failure_used_words += used_words;
682     _failure_waste_words += HeapRegion::GrainWords - used_words;
683     _after_used_bytes += r->used();
684 
685     // When moving a young gen region to old gen, we "allocate" that whole
686     // region there. This is in addition to any already evacuated objects.
687     // Notify the policy about that. Old gen regions do not cause an
688     // additional allocation: both the objects still in the region and the
689     // ones already moved are accounted for elsewhere.
690     if (r->is_young()) {
691       _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
692     }
693   }
694 
695   void account_evacuated_region(HeapRegion* r) {
696     size_t used = r->used();
697     assert(used > 0, "region %u %s zero used", r->hrm_index(), r->get_short_type_str());
698     _before_used_bytes += used;
699     _regions_freed += 1;
700   }
701 
702   void account_card_rs_length(HeapRegion* r) {
703     _card_rs_length += r->rem_set()->occupied();
704   }
705 };
706 
707 // Closure applied to all regions in the collection set.
708 class FreeCSetClosure : public HeapRegionClosure {
709   // Helper to send JFR events for regions.
710   class JFREventForRegion {
711     EventGCPhaseParallel _event;
712 
713   public:
714     JFREventForRegion(HeapRegion* region, uint worker_id) : _event() {
715       _event.set_gcId(GCId::current());
716       _event.set_gcWorkerId(worker_id);
717       if (region->is_young()) {
718         _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::YoungFreeCSet));
719       } else {
720         _event.set_name(G1GCPhaseTimes::phase_name(G1GCPhaseTimes::NonYoungFreeCSet));
721       }
722     }
723 
724     ~JFREventForRegion() {
725       _event.commit();
726     }
727   };
728 
729   // Helper to do timing for region work.
730   class TimerForRegion {
731     Tickspan& _time;
732     Ticks     _start_time;
733   public:
734     TimerForRegion(Tickspan& time) : _time(time), _start_time(Ticks::now()) { }
735     ~TimerForRegion() {
736       _time += Ticks::now() - _start_time;
737     }
738   };
739 
740   // FreeCSetClosure members
741   G1CollectedHeap* _g1h;
742   const size_t*    _surviving_young_words;
743   uint             _worker_id;
744   Tickspan         _young_time;
745   Tickspan         _non_young_time;
746   FreeCSetStats*   _stats;
747   G1EvacFailureRegions* _evac_failure_regions;
748   uint             _num_retained_regions;
749 
750   void assert_tracks_surviving_words(HeapRegion* r) {
751     assert(r->young_index_in_cset() != 0 &&
752            (uint)r->young_index_in_cset() <= _g1h->collection_set()->young_region_length(),
753            "Young index %u is wrong for region %u of type %s with %u young regions",
754            r->young_index_in_cset(), r->hrm_index(), r->get_type_str(), _g1h->collection_set()->young_region_length());
755   }
756 
757   void handle_evacuated_region(HeapRegion* r) {
758     assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
759     stats()->account_evacuated_region(r);
760 
761     // Free the region and its remembered set.
762     _g1h->free_region(r, nullptr);
763     _g1h->hr_printer()->cleanup(r);
764   }
765 
766   void handle_failed_region(HeapRegion* r) {
767     // Do some allocation statistics accounting. Regions that failed evacuation
768     // are always made old, so there is no need to update anything in the young
769     // gen statistics, but we need to update old gen statistics.
770     stats()->account_failed_region(r);
771 
772     G1GCPhaseTimes* p = _g1h->phase_times();
773     assert(r->in_collection_set(), "Failed evacuation of region %u not in collection set", r->hrm_index());
774 
775     p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreEvacuationFailedRegions,
776                                       _worker_id,
777                                       1,
778                                       G1GCPhaseTimes::RestoreEvacFailureRegionsEvacFailedNum);
779 
780     bool retain_region = _g1h->policy()->should_retain_evac_failed_region(r);
781     // Update the region state due to the failed evacuation.
782     r->handle_evacuation_failure(retain_region);
783     assert(r->is_old(), "must already be relabelled as old");
784 
785     if (retain_region) {
786       _g1h->retain_region(r);
787       _num_retained_regions++;
788     }
789     assert(retain_region == r->rem_set()->is_tracked(), "When retaining a region, remembered set should be kept.");
790 
791     // Add region to old set, need to hold lock.
792     MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
793     _g1h->old_set_add(r);
794   }
795 
796   Tickspan& timer_for_region(HeapRegion* r) {
797     return r->is_young() ? _young_time : _non_young_time;
798   }
799 
800   FreeCSetStats* stats() {
801     return _stats;
802   }
803 
804 public:
805   FreeCSetClosure(const size_t* surviving_young_words,
806                   uint worker_id,
807                   FreeCSetStats* stats,
808                   G1EvacFailureRegions* evac_failure_regions) :
809       HeapRegionClosure(),
810       _g1h(G1CollectedHeap::heap()),
811       _surviving_young_words(surviving_young_words),
812       _worker_id(worker_id),
813       _young_time(),
814       _non_young_time(),
815       _stats(stats),
816       _evac_failure_regions(evac_failure_regions),
817       _num_retained_regions(0) { }
818 
819   virtual bool do_heap_region(HeapRegion* r) {
820     assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
821     JFREventForRegion event(r, _worker_id);
822     TimerForRegion timer(timer_for_region(r));
823 
824     stats()->account_card_rs_length(r);
825 
826     if (r->is_young()) {
827       assert_tracks_surviving_words(r);
828       r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
829     }
830 
831     if (_evac_failure_regions->contains(r->hrm_index())) {
832       handle_failed_region(r);
833     } else {
834       handle_evacuated_region(r);
835     }
836     assert(!_g1h->is_on_master_free_list(r), "sanity");
837 
838     return false;
839   }
840 
841   void report_timing() {
842     G1GCPhaseTimes* pt = _g1h->phase_times();
843     if (_young_time.value() > 0) {
844       pt->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, _worker_id, _young_time.seconds());
845     }
846     if (_non_young_time.value() > 0) {
847       pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
848     }
849   }
850 
851   bool num_retained_regions() const { return _num_retained_regions; }
852 };
853 
854 class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1AbstractSubTask {
855   G1CollectedHeap*  _g1h;
856   G1EvacInfo*       _evacuation_info;
857   FreeCSetStats*    _worker_stats;
858   HeapRegionClaimer _claimer;
859   const size_t*     _surviving_young_words;
860   uint              _active_workers;
861   G1EvacFailureRegions* _evac_failure_regions;
862   volatile uint     _num_retained_regions;
863 
864   FreeCSetStats* worker_stats(uint worker) {
865     return &_worker_stats[worker];
866   }
867 
868   void report_statistics() {
869     // Merge the accounting
870     FreeCSetStats total_stats;
871     for (uint worker = 0; worker < _active_workers; worker++) {
872       total_stats.merge_stats(worker_stats(worker));
873     }
874     total_stats.report(_g1h, _evacuation_info);
875   }
876 
877 public:
878   FreeCollectionSetTask(G1EvacInfo* evacuation_info,
879                         const size_t* surviving_young_words,
880                         G1EvacFailureRegions* evac_failure_regions) :
881     G1AbstractSubTask(G1GCPhaseTimes::FreeCollectionSet),
882     _g1h(G1CollectedHeap::heap()),
883     _evacuation_info(evacuation_info),
884     _worker_stats(nullptr),
885     _claimer(0),
886     _surviving_young_words(surviving_young_words),
887     _active_workers(0),
888     _evac_failure_regions(evac_failure_regions),
889     _num_retained_regions(0) {
890 
891     _g1h->clear_eden();
892   }
893 
894   virtual ~FreeCollectionSetTask() {
895     Ticks serial_time = Ticks::now();
896 
897     bool has_new_retained_regions = Atomic::load(&_num_retained_regions) != 0;
898     if (has_new_retained_regions) {
899       G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
900       candidates->sort_by_efficiency();
901     }
902 
903     report_statistics();
904     for (uint worker = 0; worker < _active_workers; worker++) {
905       _worker_stats[worker].~FreeCSetStats();
906     }
907     FREE_C_HEAP_ARRAY(FreeCSetStats, _worker_stats);
908 
909     G1GCPhaseTimes* p = _g1h->phase_times();
910     p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
911 
912     _g1h->clear_collection_set();
913   }
914 
915   double worker_cost() const override { return G1CollectedHeap::heap()->collection_set()->region_length(); }
916 
917   void set_max_workers(uint max_workers) override {
918     _active_workers = max_workers;
919     _worker_stats = NEW_C_HEAP_ARRAY(FreeCSetStats, max_workers, mtGC);
920     for (uint worker = 0; worker < _active_workers; worker++) {
921       ::new (&_worker_stats[worker]) FreeCSetStats();
922     }
923     _claimer.set_n_workers(_active_workers);
924   }
925 
926   void do_work(uint worker_id) override {
927     FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id), _evac_failure_regions);
928     _g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
929     // Report per-region type timings.
930     cl.report_timing();
931 
932     Atomic::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed);
933   }
934 };
935 
936 class G1PostEvacuateCollectionSetCleanupTask2::ResizeTLABsTask : public G1AbstractSubTask {
937   G1JavaThreadsListClaimer _claimer;
938 
939   // There is not much work per thread so the number of threads per worker is high.
940   static const uint ThreadsPerWorker = 250;
941 
942 public:
943   ResizeTLABsTask() : G1AbstractSubTask(G1GCPhaseTimes::ResizeThreadLABs), _claimer(ThreadsPerWorker) { }
944 
945   void do_work(uint worker_id) override {
946     class ResizeClosure : public ThreadClosure {
947     public:
948 
949       void do_thread(Thread* thread) {
950         static_cast<JavaThread*>(thread)->tlab().resize();
951       }
952     } cl;
953     _claimer.apply(&cl);
954   }
955 
956   double worker_cost() const override {
957     return (double)_claimer.length() / ThreadsPerWorker;
958   }
959 };
960 
961 G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
962                                                                                  G1EvacInfo* evacuation_info,
963                                                                                  G1EvacFailureRegions* evac_failure_regions) :
964   G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
965 {
966 #if COMPILER2_OR_JVMCI
967   add_serial_task(new UpdateDerivedPointersTask());
968 #endif
969   if (G1CollectedHeap::heap()->has_humongous_reclaim_candidates()) {
970     add_serial_task(new EagerlyReclaimHumongousObjectsTask());
971   }
972 
973   if (evac_failure_regions->has_regions_evac_failed()) {
974     add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
975     add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
976   }
977   add_parallel_task(new RedirtyLoggedCardsTask(evac_failure_regions,
978                                                per_thread_states->rdc_buffers(),
979                                                per_thread_states->num_workers()));
980 
981   if (UseTLAB && ResizeTLAB) {
982     add_parallel_task(new ResizeTLABsTask());
983   }
984   add_parallel_task(new FreeCollectionSetTask(evacuation_info,
985                                               per_thread_states->surviving_young_words(),
986                                               evac_failure_regions));
987 }