1 /*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP
26 #define SHARE_GC_SHARED_SPACE_INLINE_HPP
27
28 #include "gc/shared/space.hpp"
29
30 #include "gc/shared/blockOffsetTable.inline.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shared/generation.hpp"
33 #include "gc/shared/spaceDecorator.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "runtime/prefetch.inline.hpp"
37 #include "runtime/safepoint.hpp"
38 #if INCLUDE_SERIALGC
39 #include "gc/serial/markSweep.inline.hpp"
40 #endif
41
42 inline HeapWord* Space::block_start(const void* p) {
43 return block_start_const(p);
44 }
45
46 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
47 HeapWord* res = ContiguousSpace::allocate(size);
48 if (res != NULL) {
49 _offsets.alloc_block(res, size);
50 }
51 return res;
52 }
53
54 // Because of the requirement of keeping "_offsets" up to date with the
55 // allocations, we sequentialize these with a lock. Therefore, best if
56 // this is used for larger LAB allocations only.
57 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
58 MutexLocker x(&_par_alloc_lock);
59 // This ought to be just "allocate", because of the lock above, but that
60 // ContiguousSpace::allocate asserts that either the allocating thread
61 // holds the heap lock or it is the VM thread and we're at a safepoint.
62 // The best I (dld) could figure was to put a field in ContiguousSpace
63 // meaning "locking at safepoint taken care of", and set/reset that
64 // here. But this will do for now, especially in light of the comment
65 // above. Perhaps in the future some lock-free manner of keeping the
66 // coordination.
67 HeapWord* res = ContiguousSpace::par_allocate(size);
68 if (res != NULL) {
69 _offsets.alloc_block(res, size);
70 }
71 return res;
72 }
73
74 inline HeapWord*
75 OffsetTableContigSpace::block_start_const(const void* p) const {
76 return _offsets.block_start(p);
77 }
78
79 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
80 return cast_to_oop(addr)->size();
81 }
82
83 #if INCLUDE_SERIALGC
84
85 class DeadSpacer : StackObj {
86 size_t _allowed_deadspace_words;
87 bool _active;
88 CompactibleSpace* _space;
89
90 public:
91 DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
92 size_t ratio = _space->allowed_dead_ratio();
93 _active = ratio > 0;
94
95 if (_active) {
96 assert(!UseG1GC, "G1 should not be using dead space");
97
98 // We allow some amount of garbage towards the bottom of the space, so
99 // we don't start compacting before there is a significant gain to be made.
100 // Occasionally, we want to ensure a full compaction, which is determined
101 // by the MarkSweepAlwaysCompactCount parameter.
102 if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
103 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
104 } else {
105 _active = false;
106 }
107 }
108 }
109
110
111 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
112 if (!_active) {
113 return false;
114 }
115
116 size_t dead_length = pointer_delta(dead_end, dead_start);
117 if (_allowed_deadspace_words >= dead_length) {
118 _allowed_deadspace_words -= dead_length;
119 CollectedHeap::fill_with_object(dead_start, dead_length);
120 oop obj = cast_to_oop(dead_start);
121 obj->set_mark(obj->mark().set_marked());
122
123 assert(dead_length == (size_t)obj->size(), "bad filler object size");
124 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
125 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
126
127 return true;
128 } else {
129 _active = false;
130 return false;
131 }
132 }
133
134 };
135
136 template <bool ALT_FWD, class SpaceType>
137 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
138 // Compute the new addresses for the live objects and store it in the mark
139 // Used by universe::mark_sweep_phase2()
140
141 // We're sure to be here before any objects are compacted into this
142 // space, so this is a good time to initialize this:
143 space->set_compaction_top(space->bottom());
144
145 if (cp->space == NULL) {
146 assert(cp->gen != NULL, "need a generation");
147 assert(cp->threshold == NULL, "just checking");
148 assert(cp->gen->first_compaction_space() == space, "just checking");
149 cp->space = cp->gen->first_compaction_space();
150 cp->threshold = cp->space->initialize_threshold();
151 cp->space->set_compaction_top(cp->space->bottom());
152 }
153
154 HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
155
156 DeadSpacer dead_spacer(space);
157
158 HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object.
159 HeapWord* first_dead = NULL; // The first dead object.
160
161 const intx interval = PrefetchScanIntervalInBytes;
162
163 HeapWord* cur_obj = space->bottom();
164 HeapWord* scan_limit = space->scan_limit();
165
166 while (cur_obj < scan_limit) {
167 if (space->scanned_block_is_obj(cur_obj) && cast_to_oop(cur_obj)->is_gc_marked()) {
168 // prefetch beyond cur_obj
169 Prefetch::write(cur_obj, interval);
170 size_t size = space->scanned_block_size(cur_obj);
171 compact_top = cp->space->forward<ALT_FWD>(cast_to_oop(cur_obj), size, cp, compact_top);
172 cur_obj += size;
173 end_of_live = cur_obj;
174 } else {
175 // run over all the contiguous dead objects
176 HeapWord* end = cur_obj;
177 do {
178 // prefetch beyond end
179 Prefetch::write(end, interval);
180 end += space->scanned_block_size(end);
181 } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !cast_to_oop(end)->is_gc_marked()));
182
183 // see if we might want to pretend this object is alive so that
184 // we don't have to compact quite as often.
185 if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
186 oop obj = cast_to_oop(cur_obj);
187 compact_top = cp->space->forward<ALT_FWD>(obj, obj->size(), cp, compact_top);
188 end_of_live = end;
189 } else {
190 // otherwise, it really is a free region.
191
192 // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
193 *(HeapWord**)cur_obj = end;
194
195 // see if this is the first dead region.
196 if (first_dead == NULL) {
197 first_dead = cur_obj;
198 }
199 }
200
201 // move on to the next object
202 cur_obj = end;
203 }
204 }
205
206 assert(cur_obj == scan_limit, "just checking");
207 space->_end_of_live = end_of_live;
208 if (first_dead != NULL) {
209 space->_first_dead = first_dead;
210 } else {
211 space->_first_dead = end_of_live;
212 }
213
214 // save the compaction_top of the compaction space.
215 cp->space->set_compaction_top(compact_top);
216 }
217
218 template <bool ALT_FWD, class SpaceType>
219 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
220 // adjust all the interior pointers to point at the new locations of objects
221 // Used by MarkSweep::mark_sweep_phase3()
222
223 HeapWord* cur_obj = space->bottom();
224 HeapWord* const end_of_live = space->_end_of_live; // Established by "scan_and_forward".
225 HeapWord* const first_dead = space->_first_dead; // Established by "scan_and_forward".
226
227 assert(first_dead <= end_of_live, "Stands to reason, no?");
228
229 const intx interval = PrefetchScanIntervalInBytes;
230
231 debug_only(HeapWord* prev_obj = NULL);
232 while (cur_obj < end_of_live) {
233 Prefetch::write(cur_obj, interval);
234 if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
235 // cur_obj is alive
236 // point all the oops to the new location
237 size_t size = MarkSweep::adjust_pointers<ALT_FWD>(cast_to_oop(cur_obj));
238 size = space->adjust_obj_size(size);
239 debug_only(prev_obj = cur_obj);
240 cur_obj += size;
241 } else {
242 debug_only(prev_obj = cur_obj);
243 // cur_obj is not a live object, instead it points at the next live object
244 cur_obj = *(HeapWord**)cur_obj;
245 assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
246 }
247 }
248
249 assert(cur_obj == end_of_live, "just checking");
250 }
251
252 #ifdef ASSERT
253 template <class SpaceType>
254 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
255 HeapWord* cur_obj = space->bottom();
256
257 if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
258 // we have a chunk of the space which hasn't moved and we've reinitialized
259 // the mark word during the previous pass, so we can't use is_gc_marked for
260 // the traversal.
261 HeapWord* prev_obj = NULL;
262
263 while (cur_obj < space->_first_dead) {
264 size_t size = space->obj_size(cur_obj);
265 assert(!cast_to_oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
266 prev_obj = cur_obj;
267 cur_obj += size;
268 }
269 }
270 }
271 #endif
272
273 template <class SpaceType>
274 inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
275 // Let's remember if we were empty before we did the compaction.
276 bool was_empty = space->used_region().is_empty();
277 // Reset space after compaction is complete
278 space->reset_after_compaction();
279 // We do this clear, below, since it has overloaded meanings for some
280 // space subtypes. For example, OffsetTableContigSpace's that were
281 // compacted into will have had their offset table thresholds updated
282 // continuously, but those that weren't need to have their thresholds
283 // re-initialized. Also mangles unused area for debugging.
284 if (space->used_region().is_empty()) {
285 if (!was_empty) space->clear(SpaceDecorator::Mangle);
286 } else {
287 if (ZapUnusedHeapArea) space->mangle_unused_area();
288 }
289 }
290
291 template <bool ALT_FWD, class SpaceType>
292 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
293 // Copy all live objects to their new location
294 // Used by MarkSweep::mark_sweep_phase4()
295
296 verify_up_to_first_dead(space);
297
298 HeapWord* const bottom = space->bottom();
299 HeapWord* const end_of_live = space->_end_of_live;
300
301 assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
302 if (space->_first_dead == end_of_live && (bottom == end_of_live || !cast_to_oop(bottom)->is_gc_marked())) {
303 // Nothing to compact. The space is either empty or all live object should be left in place.
304 clear_empty_region(space);
305 return;
306 }
307
308 const intx scan_interval = PrefetchScanIntervalInBytes;
309 const intx copy_interval = PrefetchCopyIntervalInBytes;
310
311 assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live));
312 HeapWord* cur_obj = bottom;
313 if (space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
314 // All object before _first_dead can be skipped. They should not be moved.
315 // A pointer to the first live object is stored at the memory location for _first_dead.
316 cur_obj = *(HeapWord**)(space->_first_dead);
317 }
318
319 debug_only(HeapWord* prev_obj = NULL);
320 while (cur_obj < end_of_live) {
321 if (!cast_to_oop(cur_obj)->is_gc_marked()) {
322 debug_only(prev_obj = cur_obj);
323 // The first word of the dead object contains a pointer to the next live object or end of space.
324 cur_obj = *(HeapWord**)cur_obj;
325 assert(cur_obj > prev_obj, "we should be moving forward through memory");
326 } else {
327 // prefetch beyond q
328 Prefetch::read(cur_obj, scan_interval);
329
330 // size and destination
331 size_t size = space->obj_size(cur_obj);
332 HeapWord* compaction_top = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee<ALT_FWD>(cast_to_oop(cur_obj)));
333
334 // prefetch beyond compaction_top
335 Prefetch::write(compaction_top, copy_interval);
336
337 // copy object and reinit its mark
338 assert(cur_obj != compaction_top, "everything in this pass should be moving");
339 Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
340 cast_to_oop(compaction_top)->init_mark();
341 assert(cast_to_oop(compaction_top)->klass() != NULL, "should have a class");
342
343 debug_only(prev_obj = cur_obj);
344 cur_obj += size;
345 }
346 }
347
348 clear_empty_region(space);
349 }
350
351 #endif // INCLUDE_SERIALGC
352
353 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
354 return cast_to_oop(addr)->size();
355 }
356
357 template <typename OopClosureType>
358 void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
359 HeapWord* t;
360 HeapWord* p = saved_mark_word();
361 assert(p != NULL, "expected saved mark");
362
363 const intx interval = PrefetchScanIntervalInBytes;
364 do {
365 t = top();
366 while (p < t) {
367 Prefetch::write(p, interval);
368 debug_only(HeapWord* prev = p);
369 oop m = cast_to_oop(p);
370 p += m->oop_iterate_size(blk);
371 }
372 } while (t < top());
373
374 set_saved_mark_word(p);
375 }
376
377 #endif // SHARE_GC_SHARED_SPACE_INLINE_HPP