1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP 26 #define SHARE_GC_SHARED_SPACE_INLINE_HPP 27 28 #include "gc/shared/space.hpp" 29 30 #include "gc/shared/blockOffsetTable.inline.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/generation.hpp" 33 #include "gc/shared/genCollectedHeap.hpp" 34 #include "gc/shared/spaceDecorator.hpp" 35 #include "oops/oopsHierarchy.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/prefetch.inline.hpp" 38 #include "runtime/safepoint.hpp" 39 #if INCLUDE_SERIALGC 40 #include "gc/serial/markSweep.inline.hpp" 41 #endif 42 43 inline HeapWord* Space::block_start(const void* p) { 44 return block_start_const(p); 45 } 46 47 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { 48 HeapWord* res = ContiguousSpace::allocate(size); 49 if (res != NULL) { 50 _offsets.alloc_block(res, size); 51 } 52 return res; 53 } 54 55 // Because of the requirement of keeping "_offsets" up to date with the 56 // allocations, we sequentialize these with a lock. Therefore, best if 57 // this is used for larger LAB allocations only. 58 inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { 59 MutexLocker x(&_par_alloc_lock); 60 // This ought to be just "allocate", because of the lock above, but that 61 // ContiguousSpace::allocate asserts that either the allocating thread 62 // holds the heap lock or it is the VM thread and we're at a safepoint. 63 // The best I (dld) could figure was to put a field in ContiguousSpace 64 // meaning "locking at safepoint taken care of", and set/reset that 65 // here. But this will do for now, especially in light of the comment 66 // above. Perhaps in the future some lock-free manner of keeping the 67 // coordination. 68 HeapWord* res = ContiguousSpace::par_allocate(size); 69 if (res != NULL) { 70 _offsets.alloc_block(res, size); 71 } 72 return res; 73 } 74 75 inline HeapWord* 76 OffsetTableContigSpace::block_start_const(const void* p) const { 77 return _offsets.block_start(p); 78 } 79 80 #if INCLUDE_SERIALGC 81 82 class DeadSpacer : StackObj { 83 size_t _allowed_deadspace_words; 84 bool _active; 85 CompactibleSpace* _space; 86 87 public: 88 DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) { 89 size_t ratio = _space->allowed_dead_ratio(); 90 _active = ratio > 0; 91 92 if (_active) { 93 assert(!UseG1GC, "G1 should not be using dead space"); 94 95 // We allow some amount of garbage towards the bottom of the space, so 96 // we don't start compacting before there is a significant gain to be made. 97 // Occasionally, we want to ensure a full compaction, which is determined 98 // by the MarkSweepAlwaysCompactCount parameter. 99 if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) { 100 _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; 101 } else { 102 _active = false; 103 } 104 } 105 } 106 107 108 bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { 109 if (!_active) { 110 return false; 111 } 112 113 size_t dead_length = pointer_delta(dead_end, dead_start); 114 if (_allowed_deadspace_words >= dead_length) { 115 _allowed_deadspace_words -= dead_length; 116 CollectedHeap::fill_with_object(dead_start, dead_length); 117 oop obj = cast_to_oop(dead_start); 118 obj->set_mark(obj->mark().set_marked()); 119 120 assert(dead_length == obj->size(), "bad filler object size"); 121 log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b", 122 p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); 123 124 return true; 125 } else { 126 _active = false; 127 return false; 128 } 129 } 130 131 }; 132 133 #ifdef ASSERT 134 template <class SpaceType> 135 inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) { 136 HeapWord* cur_obj = space->bottom(); 137 138 if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) { 139 // we have a chunk of the space which hasn't moved and we've reinitialized 140 // the mark word during the previous pass, so we can't use is_gc_marked for 141 // the traversal. 142 HeapWord* prev_obj = NULL; 143 144 while (cur_obj < space->_first_dead) { 145 size_t size = cast_to_oop(cur_obj)->size(); 146 assert(!cast_to_oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)"); 147 prev_obj = cur_obj; 148 cur_obj += size; 149 } 150 } 151 } 152 #endif 153 154 template <class SpaceType> 155 inline void CompactibleSpace::clear_empty_region(SpaceType* space) { 156 // Let's remember if we were empty before we did the compaction. 157 bool was_empty = space->used_region().is_empty(); 158 // Reset space after compaction is complete 159 space->reset_after_compaction(); 160 // We do this clear, below, since it has overloaded meanings for some 161 // space subtypes. For example, OffsetTableContigSpace's that were 162 // compacted into will have had their offset table thresholds updated 163 // continuously, but those that weren't need to have their thresholds 164 // re-initialized. Also mangles unused area for debugging. 165 if (space->used_region().is_empty()) { 166 if (!was_empty) space->clear(SpaceDecorator::Mangle); 167 } else { 168 if (ZapUnusedHeapArea) space->mangle_unused_area(); 169 } 170 } 171 #endif // INCLUDE_SERIALGC 172 173 template <typename OopClosureType> 174 void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) { 175 HeapWord* t; 176 HeapWord* p = saved_mark_word(); 177 assert(p != NULL, "expected saved mark"); 178 179 const intx interval = PrefetchScanIntervalInBytes; 180 do { 181 t = top(); 182 while (p < t) { 183 Prefetch::write(p, interval); 184 debug_only(HeapWord* prev = p); 185 oop m = cast_to_oop(p); 186 p += m->oop_iterate_size(blk); 187 } 188 } while (t < top()); 189 190 set_saved_mark_word(p); 191 } 192 193 #endif // SHARE_GC_SHARED_SPACE_INLINE_HPP