1 /*
2 * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.hpp"
27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
28 #include "gc/g1/g1FullCollector.inline.hpp"
29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
30 #include "gc/g1/g1FullGCMarker.hpp"
31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
32 #include "gc/g1/g1FullGCPrepareTask.hpp"
33 #include "gc/g1/g1HotCardCache.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/referenceProcessor.hpp"
37 #include "logging/log.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "utilities/ticks.hpp"
41
42 template<bool is_humongous>
43 void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
44 _regions_freed = true;
45 if (is_humongous) {
46 _g1h->free_humongous_region(hr, nullptr);
47 } else {
48 _g1h->free_region(hr, nullptr);
49 }
50 prepare_for_compaction(hr);
51 _collector->set_invalid(hr->hrm_index());
52 }
53
54 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
55 bool force_not_compacted = false;
56 if (should_compact(hr)) {
57 assert(!hr->is_humongous(), "moving humongous objects not supported.");
58 prepare_for_compaction(hr);
59 } else {
60 // There is no need to iterate and forward objects in pinned regions ie.
61 // prepare them for compaction. The adjust pointers phase will skip
62 // work for them.
63 assert(hr->containing_set() == nullptr, "already cleared by PrepareRegionsClosure");
64 if (hr->is_humongous()) {
65 oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
66 if (!_bitmap->is_marked(obj)) {
67 free_pinned_region<true>(hr);
68 }
69 } else if (hr->is_open_archive()) {
70 bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
71 if (is_empty) {
72 free_pinned_region<false>(hr);
73 }
74 } else if (hr->is_closed_archive()) {
75 // nothing to do with closed archive region
76 } else {
77 assert(MarkSweepDeadRatio > 0,
78 "only skip compaction for other regions when MarkSweepDeadRatio > 0");
79
80 // Too many live objects; skip compacting it.
81 _collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
82 if (hr->is_young()) {
83 // G1 updates the BOT for old region contents incrementally, but young regions
84 // lack BOT information for performance reasons.
85 // Recreate BOT information of high live ratio young regions here to keep expected
86 // performance during scanning their card tables in the collection pauses later.
87 hr->update_bot();
88 }
89 log_trace(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
90 hr->hrm_index(), _collector->live_words(hr->hrm_index()));
91 }
92 }
93
94 // Reset data structures not valid after Full GC.
95 reset_region_metadata(hr);
96
97 return false;
98 }
99
100 G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
101 G1FullGCTask("G1 Prepare Compact Task", collector),
102 _freed_regions(false),
103 _hrclaimer(collector->workers()) {
104 }
105
106 void G1FullGCPrepareTask::set_freed_regions() {
107 if (!_freed_regions) {
108 _freed_regions = true;
109 }
110 }
111
112 bool G1FullGCPrepareTask::has_freed_regions() {
113 return _freed_regions;
114 }
115
116 void G1FullGCPrepareTask::work(uint worker_id) {
117 Ticks start = Ticks::now();
118 G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
119 G1CalculatePointersClosure closure(collector(), compaction_point);
120 G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
121
122 compaction_point->update();
123
124 // Check if any regions was freed by this worker and store in task.
125 if (closure.freed_regions()) {
126 set_freed_regions();
127 }
128 log_task("Prepare compaction task", worker_id, start);
129 }
130
131 G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1FullCollector* collector,
132 G1FullGCCompactionPoint* cp) :
133 _g1h(G1CollectedHeap::heap()),
134 _collector(collector),
135 _bitmap(collector->mark_bitmap()),
136 _cp(cp),
137 _regions_freed(false) { }
138
139 bool G1FullGCPrepareTask::G1CalculatePointersClosure::should_compact(HeapRegion* hr) {
140 if (hr->is_pinned()) {
141 return false;
142 }
143 size_t live_words = _collector->live_words(hr->hrm_index());
144 size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
145 // High live ratio region will not be compacted.
146 return live_words <= live_words_threshold;
147 }
148
149 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
150 hr->rem_set()->clear();
151 hr->clear_cardtable();
152
153 G1HotCardCache* hcc = _g1h->hot_card_cache();
154 if (hcc->use_cache()) {
155 hcc->reset_card_counts(hr);
156 }
157 }
158
159 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
160 _cp(cp) { }
161
162 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
163 size_t size = object->size();
164 _cp->forward(object, size);
165 return size;
166 }
167
168 size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
169 // We only re-prepare objects forwarded within the current region, so
170 // skip objects that are already forwarded to another region.
171 oop forwarded_to = obj->forwardee();
172 if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
173 return obj->size();
174 }
175
176 // Get size and forward.
177 size_t size = obj->size();
178 _cp->forward(obj, size);
179
180 return size;
181 }
182
183 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
184 HeapRegion* hr) {
185 G1PrepareCompactLiveClosure prepare_compact(cp);
186 hr->set_compaction_top(hr->bottom());
187 hr->apply_to_marked_objects(_bitmap, &prepare_compact);
188 }
189
190 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
191 if (!_cp->is_initialized()) {
192 hr->set_compaction_top(hr->bottom());
193 _cp->initialize(hr, true);
194 }
195 // Add region to the compaction queue and prepare it.
196 _cp->add(hr);
197 prepare_for_compaction_work(_cp, hr);
198 }
199
200 void G1FullGCPrepareTask::prepare_serial_compaction() {
201 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
202 // At this point we know that no regions were completely freed by
203 // the parallel compaction. That means that the last region of
204 // all compaction queues still have data in them. We try to compact
205 // these regions in serial to avoid a premature OOM.
206 for (uint i = 0; i < collector()->workers(); i++) {
207 G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
208 if (cp->has_regions()) {
209 collector()->serial_compaction_point()->add(cp->remove_last());
210 }
211 }
212
213 // Update the forwarding information for the regions in the serial
214 // compaction point.
215 G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
216 for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
217 HeapRegion* current = *it;
218 if (!cp->is_initialized()) {
219 // Initialize the compaction point. Nothing more is needed for the first heap region
220 // since it is already prepared for compaction.
221 cp->initialize(current, false);
222 } else {
223 assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
224 G1RePrepareClosure re_prepare(cp, current);
225 current->set_compaction_top(current->bottom());
226 current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
227 }
228 }
229 cp->update();
230 }
231
232 bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
233 if (_regions_freed) {
234 return true;
235 }
236
237 if (!_cp->has_regions()) {
238 // No regions in queue, so no free ones either.
239 return false;
240 }
241
242 if (_cp->current_region() != _cp->regions()->last()) {
243 // The current region used for compaction is not the last in the
244 // queue. That means there is at least one free region in the queue.
245 return true;
246 }
247
248 // No free regions in the queue.
249 return false;
250 }