1 /*
2 * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.hpp"
27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
28 #include "gc/g1/g1FullCollector.inline.hpp"
29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
30 #include "gc/g1/g1FullGCMarker.hpp"
31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
32 #include "gc/g1/g1FullGCPrepareTask.hpp"
33 #include "gc/g1/g1HotCardCache.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/referenceProcessor.hpp"
37 #include "gc/shared/slidingForwarding.inline.hpp"
38 #include "logging/log.hpp"
39 #include "memory/iterator.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "utilities/ticks.hpp"
42
43 template<bool is_humongous>
44 void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
45 _regions_freed = true;
46 if (is_humongous) {
47 _g1h->free_humongous_region(hr, nullptr);
48 } else {
49 _g1h->free_region(hr, nullptr);
50 }
51 prepare_for_compaction(hr);
52 _collector->set_invalid(hr->hrm_index());
53 }
54
55 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
56 bool force_not_compacted = false;
57 if (should_compact(hr)) {
58 assert(!hr->is_humongous(), "moving humongous objects not supported.");
59 prepare_for_compaction(hr);
60 } else {
61 // There is no need to iterate and forward objects in pinned regions ie.
62 // prepare them for compaction. The adjust pointers phase will skip
63 // work for them.
64 assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
65 if (hr->is_humongous()) {
66 oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
67 if (!_bitmap->is_marked(obj)) {
68 free_pinned_region<true>(hr);
69 }
70 } else if (hr->is_open_archive()) {
71 bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
72 if (is_empty) {
73 free_pinned_region<false>(hr);
74 }
75 } else if (hr->is_closed_archive()) {
76 // nothing to do with closed archive region
77 } else {
78 assert(MarkSweepDeadRatio > 0,
79 "only skip compaction for other regions when MarkSweepDeadRatio > 0");
80
81 // Too many live objects; skip compacting it.
82 _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
83 if (hr->is_young()) {
84 // G1 updates the BOT for old region contents incrementally, but young regions
85 // lack BOT information for performance reasons.
86 // Recreate BOT information of high live ratio young regions here to keep expected
87 // performance during scanning their card tables in the collection pauses later.
88 hr->update_bot();
89 }
90 log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
91 hr->hrm_index(), _collector->live_words(hr->hrm_index()));
92 }
93 }
94
95 // Reset data structures not valid after Full GC.
96 reset_region_metadata(hr);
97
98 return false;
99 }
100
101 G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
102 G1FullGCTask("G1 Prepare Compact Task", collector),
103 _freed_regions(false),
104 _hrclaimer(collector->workers()) {
105 }
106
107 void G1FullGCPrepareTask::set_freed_regions() {
108 if (!_freed_regions) {
109 _freed_regions = true;
110 }
111 }
112
113 bool G1FullGCPrepareTask::has_freed_regions() {
114 return _freed_regions;
115 }
116
117 void G1FullGCPrepareTask::work(uint worker_id) {
118 Ticks start = Ticks::now();
119 G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
120 G1CalculatePointersClosure closure(collector(), compaction_point);
121 G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
122
123 compaction_point->update();
124
125 // Check if any regions was freed by this worker and store in task.
126 if (closure.freed_regions()) {
127 set_freed_regions();
128 }
129 log_task("Prepare compaction task", worker_id, start);
130 }
131
132 G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1FullCollector* collector,
133 G1FullGCCompactionPoint* cp) :
134 _g1h(G1CollectedHeap::heap()),
135 _collector(collector),
136 _bitmap(collector->mark_bitmap()),
137 _cp(cp),
138 _regions_freed(false) { }
139
140 bool G1FullGCPrepareTask::G1CalculatePointersClosure::should_compact(HeapRegion* hr) {
141 if (hr->is_pinned()) {
142 return false;
143 }
144 size_t live_words = _collector->live_words(hr->hrm_index());
145 size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
146 // High live ratio region will not be compacted.
147 return live_words <= live_words_threshold;
148 }
149
150 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
151 hr->rem_set()->clear();
152 hr->clear_cardtable();
153
154 G1HotCardCache* hcc = _g1h->hot_card_cache();
155 if (hcc->use_cache()) {
156 hcc->reset_card_counts(hr);
157 }
158 }
159
160 template <bool ALT_FWD>
161 G1FullGCPrepareTask::G1PrepareCompactLiveClosure<ALT_FWD>::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
162 _cp(cp) { }
163
164 template <bool ALT_FWD>
165 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure<ALT_FWD>::apply(oop object) {
166 size_t size = object->size();
167 _cp->forward<ALT_FWD>(object, size);
168 return size;
169 }
170
171 template <bool ALT_FWD>
172 size_t G1FullGCPrepareTask::G1RePrepareClosure<ALT_FWD>::apply(oop obj) {
173 // We only re-prepare objects forwarded within the current region, so
174 // skip objects that are already forwarded to another region.
175 if (SlidingForwarding::is_forwarded(obj)) {
176 oop forwarded_to = SlidingForwarding::forwardee<ALT_FWD>(obj);
177 assert(forwarded_to != NULL, "must have forwardee");
178 if (!_current->is_in(forwarded_to)) {
179 return obj->size();
180 }
181 }
182 // Get size and forward.
183 size_t size = obj->size();
184 _cp->forward<ALT_FWD>(obj, size);
185
186 return size;
187 }
188
189 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
190 HeapRegion* hr) {
191 if (UseAltGCForwarding) {
192 G1PrepareCompactLiveClosure<true> prepare_compact(cp);
193 hr->set_compaction_top(hr->bottom());
194 hr->apply_to_marked_objects(_bitmap, &prepare_compact);
195 } else {
196 G1PrepareCompactLiveClosure<false> prepare_compact(cp);
197 hr->set_compaction_top(hr->bottom());
198 hr->apply_to_marked_objects(_bitmap, &prepare_compact);
199 }
200 }
201
202 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
203 if (!_cp->is_initialized()) {
204 hr->set_compaction_top(hr->bottom());
205 _cp->initialize(hr, true);
206 }
207 // Add region to the compaction queue and prepare it.
208 _cp->add(hr);
209 prepare_for_compaction_work(_cp, hr);
210 }
211
212 template <bool ALT_FWD>
213 void G1FullGCPrepareTask::prepare_serial_compaction_impl() {
214 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
215 // At this point we know that no regions were completely freed by
216 // the parallel compaction. That means that the last region of
217 // all compaction queues still have data in them. We try to compact
218 // these regions in serial to avoid a premature OOM.
219 for (uint i = 0; i < collector()->workers(); i++) {
220 G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
221 if (cp->has_regions()) {
222 collector()->serial_compaction_point()->add(cp->remove_last());
223 }
224 }
225
226 // Update the forwarding information for the regions in the serial
227 // compaction point.
228 G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
229 for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
230 HeapRegion* current = *it;
231 if (!cp->is_initialized()) {
232 // Initialize the compaction point. Nothing more is needed for the first heap region
233 // since it is already prepared for compaction.
234 cp->initialize(current, false);
235 } else {
236 assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
237 G1RePrepareClosure<ALT_FWD> re_prepare(cp, current);
238 current->set_compaction_top(current->bottom());
239 current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
240 }
241 }
242 cp->update();
243 }
244
245 void G1FullGCPrepareTask::prepare_serial_compaction() {
246 if (UseAltGCForwarding) {
247 prepare_serial_compaction_impl<true>();
248 } else {
249 prepare_serial_compaction_impl<false>();
250 }
251 }
252
253 bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
254 if (_regions_freed) {
255 return true;
256 }
257
258 if (!_cp->has_regions()) {
259 // No regions in queue, so no free ones either.
260 return false;
261 }
262
263 if (_cp->current_region() != _cp->regions()->last()) {
264 // The current region used for compaction is not the last in the
265 // queue. That means there is at least one free region in the queue.
266 return true;
267 }
268
269 // No free regions in the queue.
270 return false;
271 }