1 /*
  2  * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahMark.hpp"
 29 
 30 #include "gc/shared/continuationGCSupport.inline.hpp"
 31 #include "gc/shenandoah/shenandoahAsserts.hpp"
 32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 35 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
 36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 37 #include "gc/shenandoah/shenandoahUtils.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "oops/compressedOops.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "runtime/prefetch.inline.hpp"
 42 #include "utilities/devirtualizer.inline.hpp"
 43 #include "utilities/powerOfTwo.hpp"
 44 
 45 template <StringDedupMode STRING_DEDUP>
 46 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
 47   if (STRING_DEDUP == ENQUEUE_DEDUP) {
 48     if (ShenandoahStringDedup::is_candidate(obj)) {
 49       req->add(obj);
 50     }
 51   } else if (STRING_DEDUP == ALWAYS_DEDUP) {
 52     if (ShenandoahStringDedup::is_string_candidate(obj) &&
 53         !ShenandoahStringDedup::dedup_requested(obj)) {
 54         req->add(obj);
 55     }
 56   }
 57 }
 58 
 59 template <class T, StringDedupMode STRING_DEDUP>
 60 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
 61   oop obj = task->obj();
 62 
 63   shenandoah_assert_not_forwarded(nullptr, obj);
 64   shenandoah_assert_marked(nullptr, obj);
 65   shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
 66 
 67   // Are we in weak subgraph scan?
 68   bool weak = task->is_weak();
 69   cl->set_weak(weak);
 70 
 71   if (task->is_not_chunked()) {
 72     if (obj->is_instance()) {
 73       // Case 1: Normal oop, process as usual.
 74       if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
 75           // Loom doesn't support mixing of weak marking and strong marking of
 76           // stack chunks.
 77           cl->set_weak(false);
 78       }
 79 
 80       obj->oop_iterate(cl);
 81       dedup_string<STRING_DEDUP>(obj, req);
 82     } else if (obj->is_objArray()) {
 83       // Case 2: Object array instance and no chunk is set. Must be the first
 84       // time we visit it, start the chunked processing.
 85       do_chunked_array_start<T>(q, cl, obj, weak);
 86     } else {
 87       // Case 3: Primitive array. Do nothing, no oops there. We use the same
 88       // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
 89       // We skip iterating over the klass pointer since we know that
 90       // Universe::TypeArrayKlass never moves.
 91       assert (obj->is_typeArray(), "should be type array");
 92     }
 93     // Count liveness the last: push the outstanding work to the queues first
 94     // Avoid double-counting objects that are visited twice due to upgrade
 95     // from final- to strong mark.
 96     if (task->count_liveness()) {
 97       count_liveness(live_data, obj);
 98     }
 99   } else {
100     // Case 4: Array chunk, has sensible chunk id. Process it.
101     do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
102   }
103 }
104 
105 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
106   ShenandoahHeap* const heap = ShenandoahHeap::heap();
107   size_t region_idx = heap->heap_region_index_containing(obj);
108   ShenandoahHeapRegion* region = heap->get_region(region_idx);
109   size_t size = obj->size();
110 
111   if (!region->is_humongous_start()) {
112     assert(!region->is_humongous(), "Cannot have continuations here");
113     ShenandoahLiveData cur = live_data[region_idx];
114     size_t new_val = size + cur;
115     if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
116       // overflow, flush to region data
117       region->increase_live_data_gc_words(new_val);
118       live_data[region_idx] = 0;
119     } else {
120       // still good, remember in locals
121       live_data[region_idx] = (ShenandoahLiveData) new_val;
122     }
123   } else {
124     shenandoah_assert_in_correct_region(nullptr, obj);
125     size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
126 
127     for (size_t i = region_idx; i < region_idx + num_regions; i++) {
128       ShenandoahHeapRegion* chain_reg = heap->get_region(i);
129       assert(chain_reg->is_humongous(), "Expecting a humongous region");
130       chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
131     }
132   }
133 }
134 
135 template <class T>
136 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
137   assert(obj->is_objArray(), "expect object array");
138   objArrayOop array = objArrayOop(obj);
139   int len = array->length();
140 
141   // Mark objArray klass metadata
142   if (Devirtualizer::do_metadata(cl)) {
143     Devirtualizer::do_klass(cl, array->klass());
144   }
145 
146   if (len <= (int) ObjArrayMarkingStride*2) {
147     // A few slices only, process directly
148     array->oop_iterate_range(cl, 0, len);
149   } else {
150     int bits = log2i_graceful(len);
151     // Compensate for non-power-of-two arrays, cover the array in excess:
152     if (len != (1 << bits)) bits++;
153 
154     // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
155     // boundaries against array->length(), touching the array header on every chunk.
156     //
157     // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
158     // If the array is not divided in chunk sizes, then there would be an irregular tail,
159     // which we will process separately.
160 
161     int last_idx = 0;
162 
163     int chunk = 1;
164     int pow = bits;
165 
166     // Handle overflow
167     if (pow >= 31) {
168       assert (pow == 31, "sanity");
169       pow--;
170       chunk = 2;
171       last_idx = (1 << pow);
172       bool pushed = q->push(ShenandoahMarkTask(array, true, weak, 1, pow));
173       assert(pushed, "overflow queue should always succeed pushing");
174     }
175 
176     // Split out tasks, as suggested in ShenandoahMarkTask docs. Record the last
177     // successful right boundary to figure out the irregular tail.
178     while ((1 << pow) > (int)ObjArrayMarkingStride &&
179            (chunk*2 < ShenandoahMarkTask::chunk_size())) {
180       pow--;
181       int left_chunk = chunk*2 - 1;
182       int right_chunk = chunk*2;
183       int left_chunk_end = left_chunk * (1 << pow);
184       if (left_chunk_end < len) {
185         bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
186         assert(pushed, "overflow queue should always succeed pushing");
187         chunk = right_chunk;
188         last_idx = left_chunk_end;
189       } else {
190         chunk = left_chunk;
191       }
192     }
193 
194     // Process the irregular tail, if present
195     int from = last_idx;
196     if (from < len) {
197       array->oop_iterate_range(cl, from, len);
198     }
199   }
200 }
201 
202 template <class T>
203 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
204   assert(obj->is_objArray(), "expect object array");
205   objArrayOop array = objArrayOop(obj);
206 
207   assert (ObjArrayMarkingStride > 0, "sanity");
208 
209   // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
210   // are known to start beyond the array.
211   while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
212     pow--;
213     chunk *= 2;
214     bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
215     assert(pushed, "overflow queue should always succeed pushing");
216   }
217 
218   int chunk_size = 1 << pow;
219 
220   int from = (chunk - 1) * chunk_size;
221   int to = chunk * chunk_size;
222 
223 #ifdef ASSERT
224   int len = array->length();
225   assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
226   assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
227 #endif
228 
229   array->oop_iterate_range(cl, from, to);
230 }
231 
232 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
233 private:
234   ShenandoahObjToScanQueue* _queue;
235   ShenandoahHeap* _heap;
236   ShenandoahMarkingContext* const _mark_context;
237 public:
238   ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
239     _queue(q),
240     _heap(ShenandoahHeap::heap()),
241     _mark_context(_heap->marking_context())
242   {
243   }
244 
245   void do_buffer(void **buffer, size_t size) {
246     assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here");
247     for (size_t i = 0; i < size; ++i) {
248       oop *p = (oop *) &buffer[i];
249       ShenandoahMark::mark_through_ref<oop>(p, _queue, _mark_context, false);
250     }
251   }
252 };
253 
254 template<class T>
255 inline void ShenandoahMark::mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
256   T o = RawAccess<>::oop_load(p);
257   if (!CompressedOops::is_null(o)) {
258     oop obj = CompressedOops::decode_not_null(o);
259 
260     shenandoah_assert_not_forwarded(p, obj);
261     shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
262 
263     bool skip_live = false;
264     bool marked;
265     if (weak) {
266       marked = mark_context->mark_weak(obj);
267     } else {
268       marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
269     }
270     if (marked) {
271       bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
272       assert(pushed, "overflow queue should always succeed pushing");
273     }
274 
275     shenandoah_assert_marked(p, obj);
276   }
277 }
278 
279 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
280   return _task_queues;
281 }
282 
283 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
284   return _task_queues->queue(index);
285 }
286 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP