1 /* 2 * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP 26 27 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp" 28 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" 29 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp" 30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 31 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 32 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" 33 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" 34 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/prefetch.inline.hpp" 38 39 template <class T> 40 void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task) { 41 oop obj = task->obj(); 42 43 shenandoah_assert_not_forwarded(NULL, obj); 44 shenandoah_assert_marked(NULL, obj); 45 shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc()); 46 47 if (task->is_not_chunked()) { 48 if (obj->is_instance()) { 49 // Case 1: Normal oop, process as usual. 50 obj->oop_iterate(cl); 51 } else if (obj->is_objArray()) { 52 // Case 2: Object array instance and no chunk is set. Must be the first 53 // time we visit it, start the chunked processing. 54 do_chunked_array_start<T>(q, cl, obj); 55 } else { 56 // Case 3: Primitive array. Do nothing, no oops there. We use the same 57 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: 58 // We skip iterating over the klass pointer since we know that 59 // Universe::TypeArrayKlass never moves. 60 assert (obj->is_typeArray(), "should be type array"); 61 } 62 // Count liveness the last: push the outstanding work to the queues first 63 count_liveness(live_data, obj); 64 } else { 65 // Case 4: Array chunk, has sensible chunk id. Process it. 66 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow()); 67 } 68 } 69 70 inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_data, oop obj) { 71 size_t region_idx = _heap->heap_region_index_containing(obj); 72 ShenandoahHeapRegion* region = _heap->get_region(region_idx); 73 size_t size = obj->size(); 74 75 if (!region->is_humongous_start()) { 76 assert(!region->is_humongous(), "Cannot have continuations here"); 77 ShenandoahLiveData cur = live_data[region_idx]; 78 size_t new_val = size + cur; 79 if (new_val >= SHENANDOAH_LIVEDATA_MAX) { 80 // overflow, flush to region data 81 region->increase_live_data_gc_words(new_val); 82 live_data[region_idx] = 0; 83 } else { 84 // still good, remember in locals 85 live_data[region_idx] = (ShenandoahLiveData) new_val; 86 } 87 } else { 88 shenandoah_assert_in_correct_region(NULL, obj); 89 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); 90 91 for (size_t i = region_idx; i < region_idx + num_regions; i++) { 92 ShenandoahHeapRegion* chain_reg = _heap->get_region(i); 93 assert(chain_reg->is_humongous(), "Expecting a humongous region"); 94 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); 95 } 96 } 97 } 98 99 template <class T> 100 inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { 101 assert(obj->is_objArray(), "expect object array"); 102 objArrayOop array = objArrayOop(obj); 103 int len = array->length(); 104 105 if (len <= (int) ObjArrayMarkingStride*2) { 106 // A few slices only, process directly 107 array->oop_iterate_range(cl, 0, len); 108 } else { 109 int bits = log2_long(len); 110 // Compensate for non-power-of-two arrays, cover the array in excess: 111 if (len != (1 << bits)) bits++; 112 113 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to 114 // boundaries against array->length(), touching the array header on every chunk. 115 // 116 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue. 117 // If the array is not divided in chunk sizes, then there would be an irregular tail, 118 // which we will process separately. 119 120 int last_idx = 0; 121 122 int chunk = 1; 123 int pow = bits; 124 125 // Handle overflow 126 if (pow >= 31) { 127 assert (pow == 31, "sanity"); 128 pow--; 129 chunk = 2; 130 last_idx = (1 << pow); 131 bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); 132 assert(pushed, "overflow queue should always succeed pushing"); 133 } 134 135 // Split out tasks, as suggested in ShenandoahMarkTask docs. Record the last 136 // successful right boundary to figure out the irregular tail. 137 while ((1 << pow) > (int)ObjArrayMarkingStride && 138 (chunk*2 < ShenandoahMarkTask::chunk_size())) { 139 pow--; 140 int left_chunk = chunk*2 - 1; 141 int right_chunk = chunk*2; 142 int left_chunk_end = left_chunk * (1 << pow); 143 if (left_chunk_end < len) { 144 bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); 145 assert(pushed, "overflow queue should always succeed pushing"); 146 chunk = right_chunk; 147 last_idx = left_chunk_end; 148 } else { 149 chunk = left_chunk; 150 } 151 } 152 153 // Process the irregular tail, if present 154 int from = last_idx; 155 if (from < len) { 156 array->oop_iterate_range(cl, from, len); 157 } 158 } 159 } 160 161 template <class T> 162 inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { 163 assert(obj->is_objArray(), "expect object array"); 164 objArrayOop array = objArrayOop(obj); 165 166 assert (ObjArrayMarkingStride > 0, "sanity"); 167 168 // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that 169 // are known to start beyond the array. 170 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { 171 pow--; 172 chunk *= 2; 173 bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); 174 assert(pushed, "overflow queue should always succeed pushing"); 175 } 176 177 int chunk_size = 1 << pow; 178 179 int from = (chunk - 1) * chunk_size; 180 int to = chunk * chunk_size; 181 182 #ifdef ASSERT 183 int len = array->length(); 184 assert (0 <= from && from < len, err_msg("from is sane: %d/%d", from, len)); 185 assert (0 < to && to <= len, err_msg("to is sane: %d/%d", to, len)); 186 #endif 187 188 array->oop_iterate_range(cl, from, to); 189 } 190 191 class ShenandoahSATBBufferClosure : public SATBBufferClosure { 192 private: 193 ShenandoahObjToScanQueue* _queue; 194 ShenandoahStrDedupQueue* _dedup_queue; 195 ShenandoahHeap* _heap; 196 ShenandoahMarkingContext* const _mark_context; 197 public: 198 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) : 199 _queue(q), 200 _dedup_queue(dq), 201 _heap(ShenandoahHeap::heap()), 202 _mark_context(_heap->marking_context()) 203 { 204 } 205 206 void do_buffer(void **buffer, size_t size) { 207 assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here"); 208 if (ShenandoahStringDedup::is_enabled()) { 209 do_buffer_impl<ENQUEUE_DEDUP>(buffer, size); 210 } else { 211 do_buffer_impl<NO_DEDUP>(buffer, size); 212 } 213 } 214 215 template<StringDedupMode STRING_DEDUP> 216 void do_buffer_impl(void **buffer, size_t size) { 217 for (size_t i = 0; i < size; ++i) { 218 oop *p = (oop *) &buffer[i]; 219 ShenandoahConcurrentMark::mark_through_ref<oop, NONE, STRING_DEDUP>(p, _heap, _queue, _mark_context, _dedup_queue); 220 } 221 } 222 }; 223 224 template<class T, UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP> 225 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, ShenandoahStrDedupQueue* dq) { 226 T o = oopDesc::load_heap_oop(p); 227 if (! oopDesc::is_null(o)) { 228 oop obj = oopDesc::decode_heap_oop_not_null(o); 229 switch (UPDATE_REFS) { 230 case NONE: 231 break; 232 case RESOLVE: 233 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 234 break; 235 case SIMPLE: 236 // We piggy-back reference updating to the marking tasks. 237 obj = heap->update_with_forwarded_not_null(p, obj); 238 break; 239 case CONCURRENT: 240 obj = heap->maybe_update_with_forwarded_not_null(p, obj); 241 break; 242 default: 243 ShouldNotReachHere(); 244 } 245 246 // Note: Only when concurrently updating references can obj be different 247 // (that is, really different, not just different from-/to-space copies of the same) 248 // from the one we originally loaded. Mutator thread can beat us by writing something 249 // else into the location. In that case, we would mark through that updated value, 250 // on the off-chance it is not handled by other means (e.g. via SATB). However, 251 // if that write was NULL, we don't need to do anything else. 252 if (UPDATE_REFS != CONCURRENT || !oopDesc::is_null(obj)) { 253 shenandoah_assert_not_forwarded(p, obj); 254 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); 255 256 if (mark_context->mark(obj)) { 257 bool pushed = q->push(ShenandoahMarkTask(obj)); 258 assert(pushed, "overflow queue should always succeed pushing"); 259 260 if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) { 261 assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); 262 assert(dq != NULL, "Dedup queue not set"); 263 ShenandoahStringDedup::enqueue_candidate(obj, dq); 264 } 265 } 266 267 shenandoah_assert_marked(p, obj); 268 } 269 } 270 } 271 272 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP