1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 30 #include "gc/g1/g1FullGCMarker.hpp"
 31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 32 #include "gc/g1/g1FullGCPrepareTask.hpp"
 33 #include "gc/g1/g1HotCardCache.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/referenceProcessor.hpp"
 37 #include "gc/shared/slidingForwarding.inline.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/iterator.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "utilities/ticks.hpp"
 42 
 43 template<bool is_humongous>
 44 void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
 45   _regions_freed = true;
 46   if (is_humongous) {
 47     _g1h->free_humongous_region(hr, nullptr);
 48   } else {
 49     _g1h->free_region(hr, nullptr);
 50   }
 51   _collector->set_free(hr->hrm_index());
 52   prepare_for_compaction(hr);
 53 }
 54 
 55 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
 56   bool force_not_compacted = false;
 57   if (should_compact(hr)) {
 58     assert(!hr->is_humongous(), "moving humongous objects not supported.");
 59     prepare_for_compaction(hr);
 60   } else {
 61     // There is no need to iterate and forward objects in pinned regions ie.
 62     // prepare them for compaction. The adjust pointers phase will skip
 63     // work for them.
 64     assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
 65     if (hr->is_humongous()) {
 66       oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
 67       if (!_bitmap->is_marked(obj)) {
 68         free_pinned_region<true>(hr);
 69       }
 70     } else if (hr->is_open_archive()) {
 71       bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
 72       if (is_empty) {
 73         free_pinned_region<false>(hr);
 74       }
 75     } else if (hr->is_closed_archive()) {
 76       // nothing to do with closed archive region
 77     } else {
 78       assert(MarkSweepDeadRatio > 0,
 79              "only skip compaction for other regions when MarkSweepDeadRatio > 0");
 80 
 81       // Too many live objects; skip compacting it.
 82       _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
 83       if (hr->is_young()) {
 84         // G1 updates the BOT for old region contents incrementally, but young regions
 85         // lack BOT information for performance reasons.
 86         // Recreate BOT information of high live ratio young regions here to keep expected
 87         // performance during scanning their card tables in the collection pauses later.
 88         hr->update_bot();
 89       }
 90       log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
 91                             hr->hrm_index(), _collector->live_words(hr->hrm_index()));
 92     }
 93   }
 94 
 95   // Reset data structures not valid after Full GC.
 96   reset_region_metadata(hr);
 97 
 98   return false;
 99 }
100 
101 G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
102     G1FullGCTask("G1 Prepare Compact Task", collector),
103     _freed_regions(false),
104     _hrclaimer(collector->workers()) {
105 }
106 
107 void G1FullGCPrepareTask::set_freed_regions() {
108   if (!_freed_regions) {
109     _freed_regions = true;
110   }
111 }
112 
113 bool G1FullGCPrepareTask::has_freed_regions() {
114   return _freed_regions;
115 }
116 
117 void G1FullGCPrepareTask::work(uint worker_id) {
118   Ticks start = Ticks::now();
119   G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
120   G1CalculatePointersClosure closure(collector(), compaction_point);
121   G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
122 
123   compaction_point->update();
124 
125   // Check if any regions was freed by this worker and store in task.
126   if (closure.freed_regions()) {
127     set_freed_regions();
128   }
129   log_task("Prepare compaction task", worker_id, start);
130 }
131 
132 G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1FullCollector* collector,
133                                                                             G1FullGCCompactionPoint* cp) :
134     _g1h(G1CollectedHeap::heap()),
135     _collector(collector),
136     _bitmap(collector->mark_bitmap()),
137     _cp(cp),
138     _regions_freed(false) { }
139 
140 bool G1FullGCPrepareTask::G1CalculatePointersClosure::should_compact(HeapRegion* hr) {
141   if (hr->is_pinned()) {
142     return false;
143   }
144   size_t live_words = _collector->live_words(hr->hrm_index());
145   size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
146   // High live ratio region will not be compacted.
147   return live_words <= live_words_threshold;
148 }
149 
150 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
151   hr->rem_set()->clear();
152   hr->clear_cardtable();
153 
154   G1HotCardCache* hcc = _g1h->hot_card_cache();
155   if (hcc->use_cache()) {
156     hcc->reset_card_counts(hr);
157   }
158 }
159 
160 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
161     _cp(cp), _forwarding(G1CollectedHeap::heap()->forwarding()) { }
162 
163 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
164   size_t size = object->size();
165   _cp->forward(_forwarding, object, size);
166   return size;
167 }
168 
169 size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
170   // We only re-prepare objects forwarded within the current region, so
171   // skip objects that are already forwarded to another region.
172   if (obj->is_forwarded()) {
173     oop forwarded_to = _forwarding->forwardee(obj);
174     if (!_current->is_in(forwarded_to)) {
175       return obj->size();
176     }
177   }
178 
179   // Get size and forward.
180   size_t size = obj->size();
181   _cp->forward(_forwarding, obj, size);
182 
183   return size;
184 }
185 
186 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
187                                                                                   HeapRegion* hr) {
188   hr->set_compaction_top(hr->bottom());
189   if (!_collector->is_free(hr->hrm_index())) {
190     G1PrepareCompactLiveClosure prepare_compact(cp);
191     hr->apply_to_marked_objects(_bitmap, &prepare_compact);
192   }
193 }
194 
195 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
196   if (!_cp->is_initialized()) {
197     hr->set_compaction_top(hr->bottom());
198     _cp->initialize(hr, true);
199   }
200   // Add region to the compaction queue and prepare it.
201   _cp->add(hr);
202   prepare_for_compaction_work(_cp, hr);
203 }
204 
205 void G1FullGCPrepareTask::prepare_serial_compaction() {
206   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
207   // At this point we know that no regions were completely freed by
208   // the parallel compaction. That means that the last region of
209   // all compaction queues still have data in them. We try to compact
210   // these regions in serial to avoid a premature OOM.
211   for (uint i = 0; i < collector()->workers(); i++) {
212     G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
213     if (cp->has_regions()) {
214       collector()->serial_compaction_point()->add(cp->remove_last());
215     }
216   }
217 
218   // Update the forwarding information for the regions in the serial
219   // compaction point.
220   G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
221   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
222     HeapRegion* current = *it;
223     if (!cp->is_initialized()) {
224       // Initialize the compaction point. Nothing more is needed for the first heap region
225       // since it is already prepared for compaction.
226       cp->initialize(current, false);
227     } else {
228       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
229       G1RePrepareClosure re_prepare(cp, current);
230       current->set_compaction_top(current->bottom());
231       current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
232     }
233   }
234   cp->update();
235 }
236 
237 bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
238   if (_regions_freed) {
239     return true;
240   }
241 
242   if (!_cp->has_regions()) {
243     // No regions in queue, so no free ones either.
244     return false;
245   }
246 
247   if (_cp->current_region() != _cp->regions()->last()) {
248     // The current region used for compaction is not the last in the
249     // queue. That means there is at least one free region in the queue.
250     return true;
251   }
252 
253   // No free regions in the queue.
254   return false;
255 }