1 /*
  2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP
 26 #define SHARE_GC_SHARED_SPACE_INLINE_HPP
 27 
 28 #include "gc/shared/space.hpp"
 29 
 30 #include "gc/shared/blockOffsetTable.inline.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/generation.hpp"
 33 #include "gc/shared/genCollectedHeap.hpp"
 34 #include "gc/shared/spaceDecorator.hpp"
 35 #include "oops/oopsHierarchy.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "runtime/prefetch.inline.hpp"
 38 #include "runtime/safepoint.hpp"
 39 #if INCLUDE_SERIALGC
 40 #include "gc/serial/markSweep.inline.hpp"
 41 #endif
 42 
 43 inline HeapWord* Space::block_start(const void* p) {
 44   return block_start_const(p);
 45 }
 46 
 47 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
 48   HeapWord* res = ContiguousSpace::allocate(size);
 49   if (res != NULL) {
 50     _offsets.alloc_block(res, size);
 51   }
 52   return res;
 53 }
 54 
 55 // Because of the requirement of keeping "_offsets" up to date with the
 56 // allocations, we sequentialize these with a lock.  Therefore, best if
 57 // this is used for larger LAB allocations only.
 58 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
 59   MutexLocker x(&_par_alloc_lock);
 60   // This ought to be just "allocate", because of the lock above, but that
 61   // ContiguousSpace::allocate asserts that either the allocating thread
 62   // holds the heap lock or it is the VM thread and we're at a safepoint.
 63   // The best I (dld) could figure was to put a field in ContiguousSpace
 64   // meaning "locking at safepoint taken care of", and set/reset that
 65   // here.  But this will do for now, especially in light of the comment
 66   // above.  Perhaps in the future some lock-free manner of keeping the
 67   // coordination.
 68   HeapWord* res = ContiguousSpace::par_allocate(size);
 69   if (res != NULL) {
 70     _offsets.alloc_block(res, size);
 71   }
 72   return res;
 73 }
 74 
 75 inline HeapWord*
 76 OffsetTableContigSpace::block_start_const(const void* p) const {
 77   return _offsets.block_start(p);
 78 }
 79 
 80 size_t CompactibleSpace::obj_size(const HeapWord* addr) const {
 81   return cast_to_oop(addr)->size();
 82 }
 83 
 84 #if INCLUDE_SERIALGC
 85 
 86 class DeadSpacer : StackObj {
 87   size_t _allowed_deadspace_words;
 88   bool _active;
 89   CompactibleSpace* _space;
 90 
 91 public:
 92   DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
 93     size_t ratio = _space->allowed_dead_ratio();
 94     _active = ratio > 0;
 95 
 96     if (_active) {
 97       assert(!UseG1GC, "G1 should not be using dead space");
 98 
 99       // We allow some amount of garbage towards the bottom of the space, so
100       // we don't start compacting before there is a significant gain to be made.
101       // Occasionally, we want to ensure a full compaction, which is determined
102       // by the MarkSweepAlwaysCompactCount parameter.
103       if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
104         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
105       } else {
106         _active = false;
107       }
108     }
109   }
110 
111 
112   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
113     if (!_active) {
114       return false;
115     }
116 
117     size_t dead_length = pointer_delta(dead_end, dead_start);
118     if (_allowed_deadspace_words >= dead_length) {
119       _allowed_deadspace_words -= dead_length;
120       CollectedHeap::fill_with_object(dead_start, dead_length);
121       oop obj = cast_to_oop(dead_start);
122       obj->set_mark(obj->mark().set_marked());
123 
124       assert(dead_length == (size_t)obj->size(), "bad filler object size");
125       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
126           p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
127 
128       return true;
129     } else {
130       _active = false;
131       return false;
132     }
133   }
134 
135 };
136 
137 template <class SpaceType>
138 inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
139   // Compute the new addresses for the live objects and store it in the mark
140   // Used by universe::mark_sweep_phase2()
141 
142   // We're sure to be here before any objects are compacted into this
143   // space, so this is a good time to initialize this:
144   space->set_compaction_top(space->bottom());
145 
146   if (cp->space == NULL) {
147     assert(cp->gen != NULL, "need a generation");
148     assert(cp->threshold == NULL, "just checking");
149     assert(cp->gen->first_compaction_space() == space, "just checking");
150     cp->space = cp->gen->first_compaction_space();
151     cp->threshold = cp->space->initialize_threshold();
152     cp->space->set_compaction_top(cp->space->bottom());
153   }
154 
155   HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to.
156 
157   DeadSpacer dead_spacer(space);
158 
159   HeapWord*  end_of_live = space->bottom();  // One byte beyond the last byte of the last live object.
160   HeapWord*  first_dead = NULL; // The first dead object.
161 
162   const intx interval = PrefetchScanIntervalInBytes;
163 
164   HeapWord* cur_obj = space->bottom();
165   HeapWord* scan_limit = space->scan_limit();
166 
167   SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
168   while (cur_obj < scan_limit) {
169     if (space->scanned_block_is_obj(cur_obj) && cast_to_oop(cur_obj)->is_gc_marked()) {
170       // prefetch beyond cur_obj
171       Prefetch::write(cur_obj, interval);
172       size_t size = space->scanned_block_size(cur_obj);
173       compact_top = cp->space->forward(cast_to_oop(cur_obj), size, cp, compact_top, forwarding);
174       cur_obj += size;
175       end_of_live = cur_obj;
176     } else {
177       // run over all the contiguous dead objects
178       HeapWord* end = cur_obj;
179       do {
180         // prefetch beyond end
181         Prefetch::write(end, interval);
182         end += space->scanned_block_size(end);
183       } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !cast_to_oop(end)->is_gc_marked()));
184 
185       // see if we might want to pretend this object is alive so that
186       // we don't have to compact quite as often.
187       if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
188         oop obj = cast_to_oop(cur_obj);
189         compact_top = cp->space->forward(obj, obj->size(), cp, compact_top, forwarding);
190         end_of_live = end;
191       } else {
192         // otherwise, it really is a free region.
193 
194         // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
195         *(HeapWord**)cur_obj = end;
196 
197         // see if this is the first dead region.
198         if (first_dead == NULL) {
199           first_dead = cur_obj;
200         }
201       }
202 
203       // move on to the next object
204       cur_obj = end;
205     }
206   }
207 
208   assert(cur_obj == scan_limit, "just checking");
209   space->_end_of_live = end_of_live;
210   if (first_dead != NULL) {
211     space->_first_dead = first_dead;
212   } else {
213     space->_first_dead = end_of_live;
214   }
215 
216   // save the compaction_top of the compaction space.
217   cp->space->set_compaction_top(compact_top);
218 }
219 
220 template <class SpaceType>
221 inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
222   // adjust all the interior pointers to point at the new locations of objects
223   // Used by MarkSweep::mark_sweep_phase3()
224 
225   HeapWord* cur_obj = space->bottom();
226   HeapWord* const end_of_live = space->_end_of_live;  // Established by "scan_and_forward".
227   HeapWord* const first_dead = space->_first_dead;    // Established by "scan_and_forward".
228   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
229 
230   assert(first_dead <= end_of_live, "Stands to reason, no?");
231 
232   const intx interval = PrefetchScanIntervalInBytes;
233 
234   debug_only(HeapWord* prev_obj = NULL);
235   while (cur_obj < end_of_live) {
236     Prefetch::write(cur_obj, interval);
237     if (cur_obj < first_dead || cast_to_oop(cur_obj)->is_gc_marked()) {
238       // cur_obj is alive
239       // point all the oops to the new location
240       size_t size = MarkSweep::adjust_pointers(forwarding, cast_to_oop(cur_obj));
241       size = space->adjust_obj_size(size);
242       debug_only(prev_obj = cur_obj);
243       cur_obj += size;
244     } else {
245       debug_only(prev_obj = cur_obj);
246       // cur_obj is not a live object, instead it points at the next live object
247       cur_obj = *(HeapWord**)cur_obj;
248       assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj));
249     }
250   }
251 
252   assert(cur_obj == end_of_live, "just checking");
253 }
254 
255 #ifdef ASSERT
256 template <class SpaceType>
257 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
258   HeapWord* cur_obj = space->bottom();
259 
260   if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
261      // we have a chunk of the space which hasn't moved and we've reinitialized
262      // the mark word during the previous pass, so we can't use is_gc_marked for
263      // the traversal.
264      HeapWord* prev_obj = NULL;
265 
266      while (cur_obj < space->_first_dead) {
267        size_t size = space->obj_size(cur_obj);
268        assert(!cast_to_oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
269        prev_obj = cur_obj;
270        cur_obj += size;
271      }
272   }
273 }
274 #endif
275 
276 template <class SpaceType>
277 inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
278   // Let's remember if we were empty before we did the compaction.
279   bool was_empty = space->used_region().is_empty();
280   // Reset space after compaction is complete
281   space->reset_after_compaction();
282   // We do this clear, below, since it has overloaded meanings for some
283   // space subtypes.  For example, OffsetTableContigSpace's that were
284   // compacted into will have had their offset table thresholds updated
285   // continuously, but those that weren't need to have their thresholds
286   // re-initialized.  Also mangles unused area for debugging.
287   if (space->used_region().is_empty()) {
288     if (!was_empty) space->clear(SpaceDecorator::Mangle);
289   } else {
290     if (ZapUnusedHeapArea) space->mangle_unused_area();
291   }
292 }
293 
294 template <class SpaceType>
295 inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
296   // Copy all live objects to their new location
297   // Used by MarkSweep::mark_sweep_phase4()
298 
299   verify_up_to_first_dead(space);
300 
301   HeapWord* const bottom = space->bottom();
302   HeapWord* const end_of_live = space->_end_of_live;
303 
304   assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live));
305   if (space->_first_dead == end_of_live && (bottom == end_of_live || !cast_to_oop(bottom)->is_gc_marked())) {
306     // Nothing to compact. The space is either empty or all live object should be left in place.
307     clear_empty_region(space);
308     return;
309   }
310 
311   const intx scan_interval = PrefetchScanIntervalInBytes;
312   const intx copy_interval = PrefetchCopyIntervalInBytes;
313 
314   assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live));
315   HeapWord* cur_obj = bottom;
316   if (space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
317     // All object before _first_dead can be skipped. They should not be moved.
318     // A pointer to the first live object is stored at the memory location for _first_dead.
319     cur_obj = *(HeapWord**)(space->_first_dead);
320   }
321 
322   const SlidingForwarding* const forwarding = GenCollectedHeap::heap()->forwarding();
323 
324   debug_only(HeapWord* prev_obj = NULL);
325   while (cur_obj < end_of_live) {
326     if (!cast_to_oop(cur_obj)->is_gc_marked()) {
327       debug_only(prev_obj = cur_obj);
328       // The first word of the dead object contains a pointer to the next live object or end of space.
329       cur_obj = *(HeapWord**)cur_obj;
330       assert(cur_obj > prev_obj, "we should be moving forward through memory");
331     } else {
332       // prefetch beyond q
333       Prefetch::read(cur_obj, scan_interval);
334 
335       // size and destination
336       size_t size = space->obj_size(cur_obj);
337       HeapWord* compaction_top = cast_from_oop<HeapWord*>(forwarding->forwardee(cast_to_oop(cur_obj)));
338 
339       // prefetch beyond compaction_top
340       Prefetch::write(compaction_top, copy_interval);
341 
342       // copy object and reinit its mark
343       assert(cur_obj != compaction_top, "everything in this pass should be moving");
344       Copy::aligned_conjoint_words(cur_obj, compaction_top, size);
345       cast_to_oop(compaction_top)->init_mark();
346       assert(cast_to_oop(compaction_top)->klass() != NULL, "should have a class");
347 
348       debug_only(prev_obj = cur_obj);
349       cur_obj += size;
350     }
351   }
352 
353   clear_empty_region(space);
354 }
355 
356 #endif // INCLUDE_SERIALGC
357 
358 size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const {
359   return cast_to_oop(addr)->size();
360 }
361 
362 template <typename OopClosureType>
363 void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
364   HeapWord* t;
365   HeapWord* p = saved_mark_word();
366   assert(p != NULL, "expected saved mark");
367 
368   const intx interval = PrefetchScanIntervalInBytes;
369   do {
370     t = top();
371     while (p < t) {
372       Prefetch::write(p, interval);
373       debug_only(HeapWord* prev = p);
374       oop m = cast_to_oop(p);
375       p += m->oop_iterate_size(blk);
376     }
377   } while (t < top());
378 
379   set_saved_mark_word(p);
380 }
381 
382 #endif // SHARE_GC_SHARED_SPACE_INLINE_HPP