1 /*
  2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP
 26 #define SHARE_GC_SHARED_SPACE_INLINE_HPP
 27 
 28 #include "gc/shared/space.hpp"
 29 
 30 #include "gc/shared/blockOffsetTable.inline.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/generation.hpp"
 33 #include "gc/shared/spaceDecorator.hpp"
 34 #include "oops/oopsHierarchy.hpp"
 35 #include "oops/oop.inline.hpp"
 36 #include "runtime/prefetch.inline.hpp"
 37 #include "runtime/safepoint.hpp"
 38 #if INCLUDE_SERIALGC
 39 #include "gc/serial/markSweep.inline.hpp"
 40 #endif
 41 
 42 inline HeapWord* Space::block_start(const void* p) {
 43   return block_start_const(p);
 44 }
 45 
 46 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
 47   HeapWord* res = ContiguousSpace::allocate(size);
 48   if (res != NULL) {
 49     _offsets.alloc_block(res, size);
 50   }
 51   return res;
 52 }
 53 
 54 // Because of the requirement of keeping "_offsets" up to date with the
 55 // allocations, we sequentialize these with a lock.  Therefore, best if
 56 // this is used for larger LAB allocations only.
 57 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) {
 58   MutexLocker x(&_par_alloc_lock);
 59   // This ought to be just "allocate", because of the lock above, but that
 60   // ContiguousSpace::allocate asserts that either the allocating thread
 61   // holds the heap lock or it is the VM thread and we're at a safepoint.
 62   // The best I (dld) could figure was to put a field in ContiguousSpace
 63   // meaning "locking at safepoint taken care of", and set/reset that
 64   // here.  But this will do for now, especially in light of the comment
 65   // above.  Perhaps in the future some lock-free manner of keeping the
 66   // coordination.
 67   HeapWord* res = ContiguousSpace::par_allocate(size);
 68   if (res != NULL) {
 69     _offsets.alloc_block(res, size);
 70   }
 71   return res;
 72 }
 73 
 74 inline HeapWord*
 75 OffsetTableContigSpace::block_start_const(const void* p) const {
 76   return _offsets.block_start(p);
 77 }
 78 
 79 #if INCLUDE_SERIALGC
 80 
 81 class DeadSpacer : StackObj {
 82   size_t _allowed_deadspace_words;
 83   bool _active;
 84   CompactibleSpace* _space;
 85 
 86 public:
 87   DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
 88     size_t ratio = _space->allowed_dead_ratio();
 89     _active = ratio > 0;
 90 
 91     if (_active) {
 92       assert(!UseG1GC, "G1 should not be using dead space");
 93 
 94       // We allow some amount of garbage towards the bottom of the space, so
 95       // we don't start compacting before there is a significant gain to be made.
 96       // Occasionally, we want to ensure a full compaction, which is determined
 97       // by the MarkSweepAlwaysCompactCount parameter.
 98       if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) {
 99         _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize;
100       } else {
101         _active = false;
102       }
103     }
104   }
105 
106 
107   bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
108     if (!_active) {
109       return false;
110     }
111 
112     size_t dead_length = pointer_delta(dead_end, dead_start);
113     if (_allowed_deadspace_words >= dead_length) {
114       _allowed_deadspace_words -= dead_length;
115       CollectedHeap::fill_with_object(dead_start, dead_length);
116       oop obj = cast_to_oop(dead_start);
117       obj->set_mark(obj->mark().set_marked());
118 
119       assert(dead_length == obj->size(), "bad filler object size");
120       log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
121           p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);
122 
123       return true;
124     } else {
125       _active = false;
126       return false;
127     }
128   }
129 
130 };
131 
132 #ifdef ASSERT
133 template <class SpaceType>
134 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
135   HeapWord* cur_obj = space->bottom();
136 
137   if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
138      // we have a chunk of the space which hasn't moved and we've reinitialized
139      // the mark word during the previous pass, so we can't use is_gc_marked for
140      // the traversal.
141      HeapWord* prev_obj = NULL;
142 
143      while (cur_obj < space->_first_dead) {
144        size_t size = cast_to_oop(cur_obj)->size();
145        assert(!cast_to_oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
146        prev_obj = cur_obj;
147        cur_obj += size;
148      }
149   }
150 }
151 #endif
152 
153 template <class SpaceType>
154 inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
155   // Let's remember if we were empty before we did the compaction.
156   bool was_empty = space->used_region().is_empty();
157   // Reset space after compaction is complete
158   space->reset_after_compaction();
159   // We do this clear, below, since it has overloaded meanings for some
160   // space subtypes.  For example, OffsetTableContigSpace's that were
161   // compacted into will have had their offset table thresholds updated
162   // continuously, but those that weren't need to have their thresholds
163   // re-initialized.  Also mangles unused area for debugging.
164   if (space->used_region().is_empty()) {
165     if (!was_empty) space->clear(SpaceDecorator::Mangle);
166   } else {
167     if (ZapUnusedHeapArea) space->mangle_unused_area();
168   }
169 }
170 #endif // INCLUDE_SERIALGC
171 
172 template <typename OopClosureType>
173 void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
174   HeapWord* t;
175   HeapWord* p = saved_mark_word();
176   assert(p != NULL, "expected saved mark");
177 
178   const intx interval = PrefetchScanIntervalInBytes;
179   do {
180     t = top();
181     while (p < t) {
182       Prefetch::write(p, interval);
183       debug_only(HeapWord* prev = p);
184       oop m = cast_to_oop(p);
185       p += m->oop_iterate_size(blk);
186     }
187   } while (t < top());
188 
189   set_saved_mark_word(p);
190 }
191 
192 #endif // SHARE_GC_SHARED_SPACE_INLINE_HPP